Add new files and directories
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitignore +52 -0
- Dockerfile +1 -1
- LICENSE +674 -0
- README.md +427 -11
- args_manager.py +40 -0
- auth-example.json +6 -0
- build_launcher.py +26 -0
- css/style.css +220 -0
- entry_with_update.py +46 -0
- environment.yaml +7 -0
- experiments_expansion.py +8 -0
- experiments_face.py +7 -0
- experiments_interrogate.py +8 -0
- extras/BLIP/configs/bert_config.json +21 -0
- extras/BLIP/configs/caption_coco.yaml +33 -0
- extras/BLIP/configs/med_config.json +21 -0
- extras/BLIP/configs/nlvr.yaml +21 -0
- extras/BLIP/configs/nocaps.yaml +15 -0
- extras/BLIP/configs/pretrain.yaml +27 -0
- extras/BLIP/configs/retrieval_coco.yaml +34 -0
- extras/BLIP/configs/retrieval_flickr.yaml +34 -0
- extras/BLIP/configs/retrieval_msrvtt.yaml +12 -0
- extras/BLIP/configs/vqa.yaml +25 -0
- extras/BLIP/models/bert_tokenizer/config.json +23 -0
- extras/BLIP/models/bert_tokenizer/tokenizer.json +0 -0
- extras/BLIP/models/bert_tokenizer/tokenizer_config.json +3 -0
- extras/BLIP/models/bert_tokenizer/vocab.txt +0 -0
- extras/BLIP/models/blip.py +239 -0
- extras/BLIP/models/blip_itm.py +76 -0
- extras/BLIP/models/blip_nlvr.py +105 -0
- extras/BLIP/models/blip_pretrain.py +339 -0
- extras/BLIP/models/blip_retrieval.py +319 -0
- extras/BLIP/models/blip_vqa.py +186 -0
- extras/BLIP/models/med.py +955 -0
- extras/BLIP/models/nlvr_encoder.py +843 -0
- extras/BLIP/models/vit.py +308 -0
- extras/expansion.py +126 -0
- extras/face_crop.py +50 -0
- extras/facexlib/detection/__init__.py +31 -0
- extras/facexlib/detection/align_trans.py +219 -0
- extras/facexlib/detection/matlab_cp2tform.py +317 -0
- extras/facexlib/detection/retinaface.py +366 -0
- extras/facexlib/detection/retinaface_net.py +196 -0
- extras/facexlib/detection/retinaface_utils.py +421 -0
- extras/facexlib/parsing/__init__.py +24 -0
- extras/facexlib/parsing/bisenet.py +140 -0
- extras/facexlib/parsing/parsenet.py +194 -0
- extras/facexlib/parsing/resnet.py +69 -0
- extras/facexlib/utils/__init__.py +7 -0
- extras/facexlib/utils/face_restoration_helper.py +374 -0
.gitignore
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
__pycache__
|
2 |
+
*.ckpt
|
3 |
+
*.safetensors
|
4 |
+
*.pth
|
5 |
+
*.pt
|
6 |
+
*.bin
|
7 |
+
*.patch
|
8 |
+
*.backup
|
9 |
+
*.corrupted
|
10 |
+
*.partial
|
11 |
+
*.onnx
|
12 |
+
sorted_styles.json
|
13 |
+
/input
|
14 |
+
/cache
|
15 |
+
/language/default.json
|
16 |
+
/test_imgs
|
17 |
+
config.txt
|
18 |
+
config_modification_tutorial.txt
|
19 |
+
user_path_config.txt
|
20 |
+
user_path_config-deprecated.txt
|
21 |
+
/modules/*.png
|
22 |
+
/repositories
|
23 |
+
/venv
|
24 |
+
/tmp
|
25 |
+
/ui-config.json
|
26 |
+
/outputs
|
27 |
+
/config.json
|
28 |
+
/log
|
29 |
+
/webui.settings.bat
|
30 |
+
/embeddings
|
31 |
+
/styles.csv
|
32 |
+
/params.txt
|
33 |
+
/styles.csv.bak
|
34 |
+
/webui-user.bat
|
35 |
+
/webui-user.sh
|
36 |
+
/interrogate
|
37 |
+
/user.css
|
38 |
+
/.idea
|
39 |
+
/notification.ogg
|
40 |
+
/notification.mp3
|
41 |
+
/SwinIR
|
42 |
+
/textual_inversion
|
43 |
+
.vscode
|
44 |
+
/extensions
|
45 |
+
/test/stdout.txt
|
46 |
+
/test/stderr.txt
|
47 |
+
/cache.json*
|
48 |
+
/config_states/
|
49 |
+
/node_modules
|
50 |
+
/package-lock.json
|
51 |
+
/.coverage*
|
52 |
+
/auth.json
|
Dockerfile
CHANGED
@@ -17,4 +17,4 @@ EXPOSE 80
|
|
17 |
ENV NAME World
|
18 |
|
19 |
# Run app.py when the container launches
|
20 |
-
CMD ["python", "./
|
|
|
17 |
ENV NAME World
|
18 |
|
19 |
# Run app.py when the container launches
|
20 |
+
CMD ["python", "./python entry_with_update.py --preset realistic"]
|
LICENSE
ADDED
@@ -0,0 +1,674 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
GNU GENERAL PUBLIC LICENSE
|
2 |
+
Version 3, 29 June 2007
|
3 |
+
|
4 |
+
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
5 |
+
Everyone is permitted to copy and distribute verbatim copies
|
6 |
+
of this license document, but changing it is not allowed.
|
7 |
+
|
8 |
+
Preamble
|
9 |
+
|
10 |
+
The GNU General Public License is a free, copyleft license for
|
11 |
+
software and other kinds of works.
|
12 |
+
|
13 |
+
The licenses for most software and other practical works are designed
|
14 |
+
to take away your freedom to share and change the works. By contrast,
|
15 |
+
the GNU General Public License is intended to guarantee your freedom to
|
16 |
+
share and change all versions of a program--to make sure it remains free
|
17 |
+
software for all its users. We, the Free Software Foundation, use the
|
18 |
+
GNU General Public License for most of our software; it applies also to
|
19 |
+
any other work released this way by its authors. You can apply it to
|
20 |
+
your programs, too.
|
21 |
+
|
22 |
+
When we speak of free software, we are referring to freedom, not
|
23 |
+
price. Our General Public Licenses are designed to make sure that you
|
24 |
+
have the freedom to distribute copies of free software (and charge for
|
25 |
+
them if you wish), that you receive source code or can get it if you
|
26 |
+
want it, that you can change the software or use pieces of it in new
|
27 |
+
free programs, and that you know you can do these things.
|
28 |
+
|
29 |
+
To protect your rights, we need to prevent others from denying you
|
30 |
+
these rights or asking you to surrender the rights. Therefore, you have
|
31 |
+
certain responsibilities if you distribute copies of the software, or if
|
32 |
+
you modify it: responsibilities to respect the freedom of others.
|
33 |
+
|
34 |
+
For example, if you distribute copies of such a program, whether
|
35 |
+
gratis or for a fee, you must pass on to the recipients the same
|
36 |
+
freedoms that you received. You must make sure that they, too, receive
|
37 |
+
or can get the source code. And you must show them these terms so they
|
38 |
+
know their rights.
|
39 |
+
|
40 |
+
Developers that use the GNU GPL protect your rights with two steps:
|
41 |
+
(1) assert copyright on the software, and (2) offer you this License
|
42 |
+
giving you legal permission to copy, distribute and/or modify it.
|
43 |
+
|
44 |
+
For the developers' and authors' protection, the GPL clearly explains
|
45 |
+
that there is no warranty for this free software. For both users' and
|
46 |
+
authors' sake, the GPL requires that modified versions be marked as
|
47 |
+
changed, so that their problems will not be attributed erroneously to
|
48 |
+
authors of previous versions.
|
49 |
+
|
50 |
+
Some devices are designed to deny users access to install or run
|
51 |
+
modified versions of the software inside them, although the manufacturer
|
52 |
+
can do so. This is fundamentally incompatible with the aim of
|
53 |
+
protecting users' freedom to change the software. The systematic
|
54 |
+
pattern of such abuse occurs in the area of products for individuals to
|
55 |
+
use, which is precisely where it is most unacceptable. Therefore, we
|
56 |
+
have designed this version of the GPL to prohibit the practice for those
|
57 |
+
products. If such problems arise substantially in other domains, we
|
58 |
+
stand ready to extend this provision to those domains in future versions
|
59 |
+
of the GPL, as needed to protect the freedom of users.
|
60 |
+
|
61 |
+
Finally, every program is threatened constantly by software patents.
|
62 |
+
States should not allow patents to restrict development and use of
|
63 |
+
software on general-purpose computers, but in those that do, we wish to
|
64 |
+
avoid the special danger that patents applied to a free program could
|
65 |
+
make it effectively proprietary. To prevent this, the GPL assures that
|
66 |
+
patents cannot be used to render the program non-free.
|
67 |
+
|
68 |
+
The precise terms and conditions for copying, distribution and
|
69 |
+
modification follow.
|
70 |
+
|
71 |
+
TERMS AND CONDITIONS
|
72 |
+
|
73 |
+
0. Definitions.
|
74 |
+
|
75 |
+
"This License" refers to version 3 of the GNU General Public License.
|
76 |
+
|
77 |
+
"Copyright" also means copyright-like laws that apply to other kinds of
|
78 |
+
works, such as semiconductor masks.
|
79 |
+
|
80 |
+
"The Program" refers to any copyrightable work licensed under this
|
81 |
+
License. Each licensee is addressed as "you". "Licensees" and
|
82 |
+
"recipients" may be individuals or organizations.
|
83 |
+
|
84 |
+
To "modify" a work means to copy from or adapt all or part of the work
|
85 |
+
in a fashion requiring copyright permission, other than the making of an
|
86 |
+
exact copy. The resulting work is called a "modified version" of the
|
87 |
+
earlier work or a work "based on" the earlier work.
|
88 |
+
|
89 |
+
A "covered work" means either the unmodified Program or a work based
|
90 |
+
on the Program.
|
91 |
+
|
92 |
+
To "propagate" a work means to do anything with it that, without
|
93 |
+
permission, would make you directly or secondarily liable for
|
94 |
+
infringement under applicable copyright law, except executing it on a
|
95 |
+
computer or modifying a private copy. Propagation includes copying,
|
96 |
+
distribution (with or without modification), making available to the
|
97 |
+
public, and in some countries other activities as well.
|
98 |
+
|
99 |
+
To "convey" a work means any kind of propagation that enables other
|
100 |
+
parties to make or receive copies. Mere interaction with a user through
|
101 |
+
a computer network, with no transfer of a copy, is not conveying.
|
102 |
+
|
103 |
+
An interactive user interface displays "Appropriate Legal Notices"
|
104 |
+
to the extent that it includes a convenient and prominently visible
|
105 |
+
feature that (1) displays an appropriate copyright notice, and (2)
|
106 |
+
tells the user that there is no warranty for the work (except to the
|
107 |
+
extent that warranties are provided), that licensees may convey the
|
108 |
+
work under this License, and how to view a copy of this License. If
|
109 |
+
the interface presents a list of user commands or options, such as a
|
110 |
+
menu, a prominent item in the list meets this criterion.
|
111 |
+
|
112 |
+
1. Source Code.
|
113 |
+
|
114 |
+
The "source code" for a work means the preferred form of the work
|
115 |
+
for making modifications to it. "Object code" means any non-source
|
116 |
+
form of a work.
|
117 |
+
|
118 |
+
A "Standard Interface" means an interface that either is an official
|
119 |
+
standard defined by a recognized standards body, or, in the case of
|
120 |
+
interfaces specified for a particular programming language, one that
|
121 |
+
is widely used among developers working in that language.
|
122 |
+
|
123 |
+
The "System Libraries" of an executable work include anything, other
|
124 |
+
than the work as a whole, that (a) is included in the normal form of
|
125 |
+
packaging a Major Component, but which is not part of that Major
|
126 |
+
Component, and (b) serves only to enable use of the work with that
|
127 |
+
Major Component, or to implement a Standard Interface for which an
|
128 |
+
implementation is available to the public in source code form. A
|
129 |
+
"Major Component", in this context, means a major essential component
|
130 |
+
(kernel, window system, and so on) of the specific operating system
|
131 |
+
(if any) on which the executable work runs, or a compiler used to
|
132 |
+
produce the work, or an object code interpreter used to run it.
|
133 |
+
|
134 |
+
The "Corresponding Source" for a work in object code form means all
|
135 |
+
the source code needed to generate, install, and (for an executable
|
136 |
+
work) run the object code and to modify the work, including scripts to
|
137 |
+
control those activities. However, it does not include the work's
|
138 |
+
System Libraries, or general-purpose tools or generally available free
|
139 |
+
programs which are used unmodified in performing those activities but
|
140 |
+
which are not part of the work. For example, Corresponding Source
|
141 |
+
includes interface definition files associated with source files for
|
142 |
+
the work, and the source code for shared libraries and dynamically
|
143 |
+
linked subprograms that the work is specifically designed to require,
|
144 |
+
such as by intimate data communication or control flow between those
|
145 |
+
subprograms and other parts of the work.
|
146 |
+
|
147 |
+
The Corresponding Source need not include anything that users
|
148 |
+
can regenerate automatically from other parts of the Corresponding
|
149 |
+
Source.
|
150 |
+
|
151 |
+
The Corresponding Source for a work in source code form is that
|
152 |
+
same work.
|
153 |
+
|
154 |
+
2. Basic Permissions.
|
155 |
+
|
156 |
+
All rights granted under this License are granted for the term of
|
157 |
+
copyright on the Program, and are irrevocable provided the stated
|
158 |
+
conditions are met. This License explicitly affirms your unlimited
|
159 |
+
permission to run the unmodified Program. The output from running a
|
160 |
+
covered work is covered by this License only if the output, given its
|
161 |
+
content, constitutes a covered work. This License acknowledges your
|
162 |
+
rights of fair use or other equivalent, as provided by copyright law.
|
163 |
+
|
164 |
+
You may make, run and propagate covered works that you do not
|
165 |
+
convey, without conditions so long as your license otherwise remains
|
166 |
+
in force. You may convey covered works to others for the sole purpose
|
167 |
+
of having them make modifications exclusively for you, or provide you
|
168 |
+
with facilities for running those works, provided that you comply with
|
169 |
+
the terms of this License in conveying all material for which you do
|
170 |
+
not control copyright. Those thus making or running the covered works
|
171 |
+
for you must do so exclusively on your behalf, under your direction
|
172 |
+
and control, on terms that prohibit them from making any copies of
|
173 |
+
your copyrighted material outside their relationship with you.
|
174 |
+
|
175 |
+
Conveying under any other circumstances is permitted solely under
|
176 |
+
the conditions stated below. Sublicensing is not allowed; section 10
|
177 |
+
makes it unnecessary.
|
178 |
+
|
179 |
+
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
180 |
+
|
181 |
+
No covered work shall be deemed part of an effective technological
|
182 |
+
measure under any applicable law fulfilling obligations under article
|
183 |
+
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
184 |
+
similar laws prohibiting or restricting circumvention of such
|
185 |
+
measures.
|
186 |
+
|
187 |
+
When you convey a covered work, you waive any legal power to forbid
|
188 |
+
circumvention of technological measures to the extent such circumvention
|
189 |
+
is effected by exercising rights under this License with respect to
|
190 |
+
the covered work, and you disclaim any intention to limit operation or
|
191 |
+
modification of the work as a means of enforcing, against the work's
|
192 |
+
users, your or third parties' legal rights to forbid circumvention of
|
193 |
+
technological measures.
|
194 |
+
|
195 |
+
4. Conveying Verbatim Copies.
|
196 |
+
|
197 |
+
You may convey verbatim copies of the Program's source code as you
|
198 |
+
receive it, in any medium, provided that you conspicuously and
|
199 |
+
appropriately publish on each copy an appropriate copyright notice;
|
200 |
+
keep intact all notices stating that this License and any
|
201 |
+
non-permissive terms added in accord with section 7 apply to the code;
|
202 |
+
keep intact all notices of the absence of any warranty; and give all
|
203 |
+
recipients a copy of this License along with the Program.
|
204 |
+
|
205 |
+
You may charge any price or no price for each copy that you convey,
|
206 |
+
and you may offer support or warranty protection for a fee.
|
207 |
+
|
208 |
+
5. Conveying Modified Source Versions.
|
209 |
+
|
210 |
+
You may convey a work based on the Program, or the modifications to
|
211 |
+
produce it from the Program, in the form of source code under the
|
212 |
+
terms of section 4, provided that you also meet all of these conditions:
|
213 |
+
|
214 |
+
a) The work must carry prominent notices stating that you modified
|
215 |
+
it, and giving a relevant date.
|
216 |
+
|
217 |
+
b) The work must carry prominent notices stating that it is
|
218 |
+
released under this License and any conditions added under section
|
219 |
+
7. This requirement modifies the requirement in section 4 to
|
220 |
+
"keep intact all notices".
|
221 |
+
|
222 |
+
c) You must license the entire work, as a whole, under this
|
223 |
+
License to anyone who comes into possession of a copy. This
|
224 |
+
License will therefore apply, along with any applicable section 7
|
225 |
+
additional terms, to the whole of the work, and all its parts,
|
226 |
+
regardless of how they are packaged. This License gives no
|
227 |
+
permission to license the work in any other way, but it does not
|
228 |
+
invalidate such permission if you have separately received it.
|
229 |
+
|
230 |
+
d) If the work has interactive user interfaces, each must display
|
231 |
+
Appropriate Legal Notices; however, if the Program has interactive
|
232 |
+
interfaces that do not display Appropriate Legal Notices, your
|
233 |
+
work need not make them do so.
|
234 |
+
|
235 |
+
A compilation of a covered work with other separate and independent
|
236 |
+
works, which are not by their nature extensions of the covered work,
|
237 |
+
and which are not combined with it such as to form a larger program,
|
238 |
+
in or on a volume of a storage or distribution medium, is called an
|
239 |
+
"aggregate" if the compilation and its resulting copyright are not
|
240 |
+
used to limit the access or legal rights of the compilation's users
|
241 |
+
beyond what the individual works permit. Inclusion of a covered work
|
242 |
+
in an aggregate does not cause this License to apply to the other
|
243 |
+
parts of the aggregate.
|
244 |
+
|
245 |
+
6. Conveying Non-Source Forms.
|
246 |
+
|
247 |
+
You may convey a covered work in object code form under the terms
|
248 |
+
of sections 4 and 5, provided that you also convey the
|
249 |
+
machine-readable Corresponding Source under the terms of this License,
|
250 |
+
in one of these ways:
|
251 |
+
|
252 |
+
a) Convey the object code in, or embodied in, a physical product
|
253 |
+
(including a physical distribution medium), accompanied by the
|
254 |
+
Corresponding Source fixed on a durable physical medium
|
255 |
+
customarily used for software interchange.
|
256 |
+
|
257 |
+
b) Convey the object code in, or embodied in, a physical product
|
258 |
+
(including a physical distribution medium), accompanied by a
|
259 |
+
written offer, valid for at least three years and valid for as
|
260 |
+
long as you offer spare parts or customer support for that product
|
261 |
+
model, to give anyone who possesses the object code either (1) a
|
262 |
+
copy of the Corresponding Source for all the software in the
|
263 |
+
product that is covered by this License, on a durable physical
|
264 |
+
medium customarily used for software interchange, for a price no
|
265 |
+
more than your reasonable cost of physically performing this
|
266 |
+
conveying of source, or (2) access to copy the
|
267 |
+
Corresponding Source from a network server at no charge.
|
268 |
+
|
269 |
+
c) Convey individual copies of the object code with a copy of the
|
270 |
+
written offer to provide the Corresponding Source. This
|
271 |
+
alternative is allowed only occasionally and noncommercially, and
|
272 |
+
only if you received the object code with such an offer, in accord
|
273 |
+
with subsection 6b.
|
274 |
+
|
275 |
+
d) Convey the object code by offering access from a designated
|
276 |
+
place (gratis or for a charge), and offer equivalent access to the
|
277 |
+
Corresponding Source in the same way through the same place at no
|
278 |
+
further charge. You need not require recipients to copy the
|
279 |
+
Corresponding Source along with the object code. If the place to
|
280 |
+
copy the object code is a network server, the Corresponding Source
|
281 |
+
may be on a different server (operated by you or a third party)
|
282 |
+
that supports equivalent copying facilities, provided you maintain
|
283 |
+
clear directions next to the object code saying where to find the
|
284 |
+
Corresponding Source. Regardless of what server hosts the
|
285 |
+
Corresponding Source, you remain obligated to ensure that it is
|
286 |
+
available for as long as needed to satisfy these requirements.
|
287 |
+
|
288 |
+
e) Convey the object code using peer-to-peer transmission, provided
|
289 |
+
you inform other peers where the object code and Corresponding
|
290 |
+
Source of the work are being offered to the general public at no
|
291 |
+
charge under subsection 6d.
|
292 |
+
|
293 |
+
A separable portion of the object code, whose source code is excluded
|
294 |
+
from the Corresponding Source as a System Library, need not be
|
295 |
+
included in conveying the object code work.
|
296 |
+
|
297 |
+
A "User Product" is either (1) a "consumer product", which means any
|
298 |
+
tangible personal property which is normally used for personal, family,
|
299 |
+
or household purposes, or (2) anything designed or sold for incorporation
|
300 |
+
into a dwelling. In determining whether a product is a consumer product,
|
301 |
+
doubtful cases shall be resolved in favor of coverage. For a particular
|
302 |
+
product received by a particular user, "normally used" refers to a
|
303 |
+
typical or common use of that class of product, regardless of the status
|
304 |
+
of the particular user or of the way in which the particular user
|
305 |
+
actually uses, or expects or is expected to use, the product. A product
|
306 |
+
is a consumer product regardless of whether the product has substantial
|
307 |
+
commercial, industrial or non-consumer uses, unless such uses represent
|
308 |
+
the only significant mode of use of the product.
|
309 |
+
|
310 |
+
"Installation Information" for a User Product means any methods,
|
311 |
+
procedures, authorization keys, or other information required to install
|
312 |
+
and execute modified versions of a covered work in that User Product from
|
313 |
+
a modified version of its Corresponding Source. The information must
|
314 |
+
suffice to ensure that the continued functioning of the modified object
|
315 |
+
code is in no case prevented or interfered with solely because
|
316 |
+
modification has been made.
|
317 |
+
|
318 |
+
If you convey an object code work under this section in, or with, or
|
319 |
+
specifically for use in, a User Product, and the conveying occurs as
|
320 |
+
part of a transaction in which the right of possession and use of the
|
321 |
+
User Product is transferred to the recipient in perpetuity or for a
|
322 |
+
fixed term (regardless of how the transaction is characterized), the
|
323 |
+
Corresponding Source conveyed under this section must be accompanied
|
324 |
+
by the Installation Information. But this requirement does not apply
|
325 |
+
if neither you nor any third party retains the ability to install
|
326 |
+
modified object code on the User Product (for example, the work has
|
327 |
+
been installed in ROM).
|
328 |
+
|
329 |
+
The requirement to provide Installation Information does not include a
|
330 |
+
requirement to continue to provide support service, warranty, or updates
|
331 |
+
for a work that has been modified or installed by the recipient, or for
|
332 |
+
the User Product in which it has been modified or installed. Access to a
|
333 |
+
network may be denied when the modification itself materially and
|
334 |
+
adversely affects the operation of the network or violates the rules and
|
335 |
+
protocols for communication across the network.
|
336 |
+
|
337 |
+
Corresponding Source conveyed, and Installation Information provided,
|
338 |
+
in accord with this section must be in a format that is publicly
|
339 |
+
documented (and with an implementation available to the public in
|
340 |
+
source code form), and must require no special password or key for
|
341 |
+
unpacking, reading or copying.
|
342 |
+
|
343 |
+
7. Additional Terms.
|
344 |
+
|
345 |
+
"Additional permissions" are terms that supplement the terms of this
|
346 |
+
License by making exceptions from one or more of its conditions.
|
347 |
+
Additional permissions that are applicable to the entire Program shall
|
348 |
+
be treated as though they were included in this License, to the extent
|
349 |
+
that they are valid under applicable law. If additional permissions
|
350 |
+
apply only to part of the Program, that part may be used separately
|
351 |
+
under those permissions, but the entire Program remains governed by
|
352 |
+
this License without regard to the additional permissions.
|
353 |
+
|
354 |
+
When you convey a copy of a covered work, you may at your option
|
355 |
+
remove any additional permissions from that copy, or from any part of
|
356 |
+
it. (Additional permissions may be written to require their own
|
357 |
+
removal in certain cases when you modify the work.) You may place
|
358 |
+
additional permissions on material, added by you to a covered work,
|
359 |
+
for which you have or can give appropriate copyright permission.
|
360 |
+
|
361 |
+
Notwithstanding any other provision of this License, for material you
|
362 |
+
add to a covered work, you may (if authorized by the copyright holders of
|
363 |
+
that material) supplement the terms of this License with terms:
|
364 |
+
|
365 |
+
a) Disclaiming warranty or limiting liability differently from the
|
366 |
+
terms of sections 15 and 16 of this License; or
|
367 |
+
|
368 |
+
b) Requiring preservation of specified reasonable legal notices or
|
369 |
+
author attributions in that material or in the Appropriate Legal
|
370 |
+
Notices displayed by works containing it; or
|
371 |
+
|
372 |
+
c) Prohibiting misrepresentation of the origin of that material, or
|
373 |
+
requiring that modified versions of such material be marked in
|
374 |
+
reasonable ways as different from the original version; or
|
375 |
+
|
376 |
+
d) Limiting the use for publicity purposes of names of licensors or
|
377 |
+
authors of the material; or
|
378 |
+
|
379 |
+
e) Declining to grant rights under trademark law for use of some
|
380 |
+
trade names, trademarks, or service marks; or
|
381 |
+
|
382 |
+
f) Requiring indemnification of licensors and authors of that
|
383 |
+
material by anyone who conveys the material (or modified versions of
|
384 |
+
it) with contractual assumptions of liability to the recipient, for
|
385 |
+
any liability that these contractual assumptions directly impose on
|
386 |
+
those licensors and authors.
|
387 |
+
|
388 |
+
All other non-permissive additional terms are considered "further
|
389 |
+
restrictions" within the meaning of section 10. If the Program as you
|
390 |
+
received it, or any part of it, contains a notice stating that it is
|
391 |
+
governed by this License along with a term that is a further
|
392 |
+
restriction, you may remove that term. If a license document contains
|
393 |
+
a further restriction but permits relicensing or conveying under this
|
394 |
+
License, you may add to a covered work material governed by the terms
|
395 |
+
of that license document, provided that the further restriction does
|
396 |
+
not survive such relicensing or conveying.
|
397 |
+
|
398 |
+
If you add terms to a covered work in accord with this section, you
|
399 |
+
must place, in the relevant source files, a statement of the
|
400 |
+
additional terms that apply to those files, or a notice indicating
|
401 |
+
where to find the applicable terms.
|
402 |
+
|
403 |
+
Additional terms, permissive or non-permissive, may be stated in the
|
404 |
+
form of a separately written license, or stated as exceptions;
|
405 |
+
the above requirements apply either way.
|
406 |
+
|
407 |
+
8. Termination.
|
408 |
+
|
409 |
+
You may not propagate or modify a covered work except as expressly
|
410 |
+
provided under this License. Any attempt otherwise to propagate or
|
411 |
+
modify it is void, and will automatically terminate your rights under
|
412 |
+
this License (including any patent licenses granted under the third
|
413 |
+
paragraph of section 11).
|
414 |
+
|
415 |
+
However, if you cease all violation of this License, then your
|
416 |
+
license from a particular copyright holder is reinstated (a)
|
417 |
+
provisionally, unless and until the copyright holder explicitly and
|
418 |
+
finally terminates your license, and (b) permanently, if the copyright
|
419 |
+
holder fails to notify you of the violation by some reasonable means
|
420 |
+
prior to 60 days after the cessation.
|
421 |
+
|
422 |
+
Moreover, your license from a particular copyright holder is
|
423 |
+
reinstated permanently if the copyright holder notifies you of the
|
424 |
+
violation by some reasonable means, this is the first time you have
|
425 |
+
received notice of violation of this License (for any work) from that
|
426 |
+
copyright holder, and you cure the violation prior to 30 days after
|
427 |
+
your receipt of the notice.
|
428 |
+
|
429 |
+
Termination of your rights under this section does not terminate the
|
430 |
+
licenses of parties who have received copies or rights from you under
|
431 |
+
this License. If your rights have been terminated and not permanently
|
432 |
+
reinstated, you do not qualify to receive new licenses for the same
|
433 |
+
material under section 10.
|
434 |
+
|
435 |
+
9. Acceptance Not Required for Having Copies.
|
436 |
+
|
437 |
+
You are not required to accept this License in order to receive or
|
438 |
+
run a copy of the Program. Ancillary propagation of a covered work
|
439 |
+
occurring solely as a consequence of using peer-to-peer transmission
|
440 |
+
to receive a copy likewise does not require acceptance. However,
|
441 |
+
nothing other than this License grants you permission to propagate or
|
442 |
+
modify any covered work. These actions infringe copyright if you do
|
443 |
+
not accept this License. Therefore, by modifying or propagating a
|
444 |
+
covered work, you indicate your acceptance of this License to do so.
|
445 |
+
|
446 |
+
10. Automatic Licensing of Downstream Recipients.
|
447 |
+
|
448 |
+
Each time you convey a covered work, the recipient automatically
|
449 |
+
receives a license from the original licensors, to run, modify and
|
450 |
+
propagate that work, subject to this License. You are not responsible
|
451 |
+
for enforcing compliance by third parties with this License.
|
452 |
+
|
453 |
+
An "entity transaction" is a transaction transferring control of an
|
454 |
+
organization, or substantially all assets of one, or subdividing an
|
455 |
+
organization, or merging organizations. If propagation of a covered
|
456 |
+
work results from an entity transaction, each party to that
|
457 |
+
transaction who receives a copy of the work also receives whatever
|
458 |
+
licenses to the work the party's predecessor in interest had or could
|
459 |
+
give under the previous paragraph, plus a right to possession of the
|
460 |
+
Corresponding Source of the work from the predecessor in interest, if
|
461 |
+
the predecessor has it or can get it with reasonable efforts.
|
462 |
+
|
463 |
+
You may not impose any further restrictions on the exercise of the
|
464 |
+
rights granted or affirmed under this License. For example, you may
|
465 |
+
not impose a license fee, royalty, or other charge for exercise of
|
466 |
+
rights granted under this License, and you may not initiate litigation
|
467 |
+
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
468 |
+
any patent claim is infringed by making, using, selling, offering for
|
469 |
+
sale, or importing the Program or any portion of it.
|
470 |
+
|
471 |
+
11. Patents.
|
472 |
+
|
473 |
+
A "contributor" is a copyright holder who authorizes use under this
|
474 |
+
License of the Program or a work on which the Program is based. The
|
475 |
+
work thus licensed is called the contributor's "contributor version".
|
476 |
+
|
477 |
+
A contributor's "essential patent claims" are all patent claims
|
478 |
+
owned or controlled by the contributor, whether already acquired or
|
479 |
+
hereafter acquired, that would be infringed by some manner, permitted
|
480 |
+
by this License, of making, using, or selling its contributor version,
|
481 |
+
but do not include claims that would be infringed only as a
|
482 |
+
consequence of further modification of the contributor version. For
|
483 |
+
purposes of this definition, "control" includes the right to grant
|
484 |
+
patent sublicenses in a manner consistent with the requirements of
|
485 |
+
this License.
|
486 |
+
|
487 |
+
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
488 |
+
patent license under the contributor's essential patent claims, to
|
489 |
+
make, use, sell, offer for sale, import and otherwise run, modify and
|
490 |
+
propagate the contents of its contributor version.
|
491 |
+
|
492 |
+
In the following three paragraphs, a "patent license" is any express
|
493 |
+
agreement or commitment, however denominated, not to enforce a patent
|
494 |
+
(such as an express permission to practice a patent or covenant not to
|
495 |
+
sue for patent infringement). To "grant" such a patent license to a
|
496 |
+
party means to make such an agreement or commitment not to enforce a
|
497 |
+
patent against the party.
|
498 |
+
|
499 |
+
If you convey a covered work, knowingly relying on a patent license,
|
500 |
+
and the Corresponding Source of the work is not available for anyone
|
501 |
+
to copy, free of charge and under the terms of this License, through a
|
502 |
+
publicly available network server or other readily accessible means,
|
503 |
+
then you must either (1) cause the Corresponding Source to be so
|
504 |
+
available, or (2) arrange to deprive yourself of the benefit of the
|
505 |
+
patent license for this particular work, or (3) arrange, in a manner
|
506 |
+
consistent with the requirements of this License, to extend the patent
|
507 |
+
license to downstream recipients. "Knowingly relying" means you have
|
508 |
+
actual knowledge that, but for the patent license, your conveying the
|
509 |
+
covered work in a country, or your recipient's use of the covered work
|
510 |
+
in a country, would infringe one or more identifiable patents in that
|
511 |
+
country that you have reason to believe are valid.
|
512 |
+
|
513 |
+
If, pursuant to or in connection with a single transaction or
|
514 |
+
arrangement, you convey, or propagate by procuring conveyance of, a
|
515 |
+
covered work, and grant a patent license to some of the parties
|
516 |
+
receiving the covered work authorizing them to use, propagate, modify
|
517 |
+
or convey a specific copy of the covered work, then the patent license
|
518 |
+
you grant is automatically extended to all recipients of the covered
|
519 |
+
work and works based on it.
|
520 |
+
|
521 |
+
A patent license is "discriminatory" if it does not include within
|
522 |
+
the scope of its coverage, prohibits the exercise of, or is
|
523 |
+
conditioned on the non-exercise of one or more of the rights that are
|
524 |
+
specifically granted under this License. You may not convey a covered
|
525 |
+
work if you are a party to an arrangement with a third party that is
|
526 |
+
in the business of distributing software, under which you make payment
|
527 |
+
to the third party based on the extent of your activity of conveying
|
528 |
+
the work, and under which the third party grants, to any of the
|
529 |
+
parties who would receive the covered work from you, a discriminatory
|
530 |
+
patent license (a) in connection with copies of the covered work
|
531 |
+
conveyed by you (or copies made from those copies), or (b) primarily
|
532 |
+
for and in connection with specific products or compilations that
|
533 |
+
contain the covered work, unless you entered into that arrangement,
|
534 |
+
or that patent license was granted, prior to 28 March 2007.
|
535 |
+
|
536 |
+
Nothing in this License shall be construed as excluding or limiting
|
537 |
+
any implied license or other defenses to infringement that may
|
538 |
+
otherwise be available to you under applicable patent law.
|
539 |
+
|
540 |
+
12. No Surrender of Others' Freedom.
|
541 |
+
|
542 |
+
If conditions are imposed on you (whether by court order, agreement or
|
543 |
+
otherwise) that contradict the conditions of this License, they do not
|
544 |
+
excuse you from the conditions of this License. If you cannot convey a
|
545 |
+
covered work so as to satisfy simultaneously your obligations under this
|
546 |
+
License and any other pertinent obligations, then as a consequence you may
|
547 |
+
not convey it at all. For example, if you agree to terms that obligate you
|
548 |
+
to collect a royalty for further conveying from those to whom you convey
|
549 |
+
the Program, the only way you could satisfy both those terms and this
|
550 |
+
License would be to refrain entirely from conveying the Program.
|
551 |
+
|
552 |
+
13. Use with the GNU Affero General Public License.
|
553 |
+
|
554 |
+
Notwithstanding any other provision of this License, you have
|
555 |
+
permission to link or combine any covered work with a work licensed
|
556 |
+
under version 3 of the GNU Affero General Public License into a single
|
557 |
+
combined work, and to convey the resulting work. The terms of this
|
558 |
+
License will continue to apply to the part which is the covered work,
|
559 |
+
but the special requirements of the GNU Affero General Public License,
|
560 |
+
section 13, concerning interaction through a network will apply to the
|
561 |
+
combination as such.
|
562 |
+
|
563 |
+
14. Revised Versions of this License.
|
564 |
+
|
565 |
+
The Free Software Foundation may publish revised and/or new versions of
|
566 |
+
the GNU General Public License from time to time. Such new versions will
|
567 |
+
be similar in spirit to the present version, but may differ in detail to
|
568 |
+
address new problems or concerns.
|
569 |
+
|
570 |
+
Each version is given a distinguishing version number. If the
|
571 |
+
Program specifies that a certain numbered version of the GNU General
|
572 |
+
Public License "or any later version" applies to it, you have the
|
573 |
+
option of following the terms and conditions either of that numbered
|
574 |
+
version or of any later version published by the Free Software
|
575 |
+
Foundation. If the Program does not specify a version number of the
|
576 |
+
GNU General Public License, you may choose any version ever published
|
577 |
+
by the Free Software Foundation.
|
578 |
+
|
579 |
+
If the Program specifies that a proxy can decide which future
|
580 |
+
versions of the GNU General Public License can be used, that proxy's
|
581 |
+
public statement of acceptance of a version permanently authorizes you
|
582 |
+
to choose that version for the Program.
|
583 |
+
|
584 |
+
Later license versions may give you additional or different
|
585 |
+
permissions. However, no additional obligations are imposed on any
|
586 |
+
author or copyright holder as a result of your choosing to follow a
|
587 |
+
later version.
|
588 |
+
|
589 |
+
15. Disclaimer of Warranty.
|
590 |
+
|
591 |
+
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
592 |
+
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
593 |
+
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
594 |
+
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
595 |
+
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
596 |
+
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
597 |
+
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
598 |
+
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
599 |
+
|
600 |
+
16. Limitation of Liability.
|
601 |
+
|
602 |
+
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
603 |
+
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
604 |
+
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
605 |
+
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
606 |
+
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
607 |
+
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
608 |
+
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
609 |
+
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
610 |
+
SUCH DAMAGES.
|
611 |
+
|
612 |
+
17. Interpretation of Sections 15 and 16.
|
613 |
+
|
614 |
+
If the disclaimer of warranty and limitation of liability provided
|
615 |
+
above cannot be given local legal effect according to their terms,
|
616 |
+
reviewing courts shall apply local law that most closely approximates
|
617 |
+
an absolute waiver of all civil liability in connection with the
|
618 |
+
Program, unless a warranty or assumption of liability accompanies a
|
619 |
+
copy of the Program in return for a fee.
|
620 |
+
|
621 |
+
END OF TERMS AND CONDITIONS
|
622 |
+
|
623 |
+
How to Apply These Terms to Your New Programs
|
624 |
+
|
625 |
+
If you develop a new program, and you want it to be of the greatest
|
626 |
+
possible use to the public, the best way to achieve this is to make it
|
627 |
+
free software which everyone can redistribute and change under these terms.
|
628 |
+
|
629 |
+
To do so, attach the following notices to the program. It is safest
|
630 |
+
to attach them to the start of each source file to most effectively
|
631 |
+
state the exclusion of warranty; and each file should have at least
|
632 |
+
the "copyright" line and a pointer to where the full notice is found.
|
633 |
+
|
634 |
+
<one line to give the program's name and a brief idea of what it does.>
|
635 |
+
Copyright (C) <year> <name of author>
|
636 |
+
|
637 |
+
This program is free software: you can redistribute it and/or modify
|
638 |
+
it under the terms of the GNU General Public License as published by
|
639 |
+
the Free Software Foundation, either version 3 of the License, or
|
640 |
+
(at your option) any later version.
|
641 |
+
|
642 |
+
This program is distributed in the hope that it will be useful,
|
643 |
+
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
644 |
+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
645 |
+
GNU General Public License for more details.
|
646 |
+
|
647 |
+
You should have received a copy of the GNU General Public License
|
648 |
+
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
649 |
+
|
650 |
+
Also add information on how to contact you by electronic and paper mail.
|
651 |
+
|
652 |
+
If the program does terminal interaction, make it output a short
|
653 |
+
notice like this when it starts in an interactive mode:
|
654 |
+
|
655 |
+
<program> Copyright (C) <year> <name of author>
|
656 |
+
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
657 |
+
This is free software, and you are welcome to redistribute it
|
658 |
+
under certain conditions; type `show c' for details.
|
659 |
+
|
660 |
+
The hypothetical commands `show w' and `show c' should show the appropriate
|
661 |
+
parts of the General Public License. Of course, your program's commands
|
662 |
+
might be different; for a GUI interface, you would use an "about box".
|
663 |
+
|
664 |
+
You should also get your employer (if you work as a programmer) or school,
|
665 |
+
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
666 |
+
For more information on this, and how to apply and follow the GNU GPL, see
|
667 |
+
<https://www.gnu.org/licenses/>.
|
668 |
+
|
669 |
+
The GNU General Public License does not permit incorporating your program
|
670 |
+
into proprietary programs. If your program is a subroutine library, you
|
671 |
+
may consider it more useful to permit linking proprietary applications with
|
672 |
+
the library. If this is what you want to do, use the GNU Lesser General
|
673 |
+
Public License instead of this License. But first, please read
|
674 |
+
<https://www.gnu.org/licenses/why-not-lgpl.html>.
|
README.md
CHANGED
@@ -1,11 +1,427 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<div align=center>
|
2 |
+
<img src="https://github.com/lllyasviel/Fooocus/assets/19834515/483fb86d-c9a2-4c20-997c-46dafc124f25">
|
3 |
+
|
4 |
+
**Non-cherry-picked** random batch by just typing two words "forest elf",
|
5 |
+
|
6 |
+
without any parameter tweaking, without any strange prompt tags.
|
7 |
+
|
8 |
+
See also **non-cherry-picked** generalization and diversity tests [here](https://github.com/lllyasviel/Fooocus/discussions/808) and [here](https://github.com/lllyasviel/Fooocus/discussions/679) and [here](https://github.com/lllyasviel/Fooocus/discussions/679#realistic).
|
9 |
+
|
10 |
+
In the entire open source community, only Fooocus can achieve this level of **non-cherry-picked** quality.
|
11 |
+
|
12 |
+
</div>
|
13 |
+
|
14 |
+
|
15 |
+
# Fooocus
|
16 |
+
|
17 |
+
Fooocus is an image generating software (based on [Gradio](https://www.gradio.app/)).
|
18 |
+
|
19 |
+
Fooocus is a rethinking of Stable Diffusion and Midjourney’s designs:
|
20 |
+
|
21 |
+
* Learned from Stable Diffusion, the software is offline, open source, and free.
|
22 |
+
|
23 |
+
* Learned from Midjourney, the manual tweaking is not needed, and users only need to focus on the prompts and images.
|
24 |
+
|
25 |
+
Fooocus has included and automated [lots of inner optimizations and quality improvements](#tech_list). Users can forget all those difficult technical parameters, and just enjoy the interaction between human and computer to "explore new mediums of thought and expanding the imaginative powers of the human species" `[1]`.
|
26 |
+
|
27 |
+
Fooocus has simplified the installation. Between pressing "download" and generating the first image, the number of needed mouse clicks is strictly limited to less than 3. Minimal GPU memory requirement is 4GB (Nvidia).
|
28 |
+
|
29 |
+
`[1]` David Holz, 2019.
|
30 |
+
|
31 |
+
**Recently many fake websites exist on Google when you search “fooocus”. Do not trust those – here is the only official source of Fooocus.**
|
32 |
+
|
33 |
+
## [Installing Fooocus](#download)
|
34 |
+
|
35 |
+
# Moving from Midjourney to Fooocus
|
36 |
+
|
37 |
+
Using Fooocus is as easy as (probably easier than) Midjourney – but this does not mean we lack functionality. Below are the details.
|
38 |
+
|
39 |
+
| Midjourney | Fooocus |
|
40 |
+
| - | - |
|
41 |
+
| High-quality text-to-image without needing much prompt engineering or parameter tuning. <br> (Unknown method) | High-quality text-to-image without needing much prompt engineering or parameter tuning. <br> (Fooocus has an offline GPT-2 based prompt processing engine and lots of sampling improvements so that results are always beautiful, no matter if your prompt is as short as “house in garden” or as long as 1000 words) |
|
42 |
+
| V1 V2 V3 V4 | Input Image -> Upscale or Variation -> Vary (Subtle) / Vary (Strong)|
|
43 |
+
| U1 U2 U3 U4 | Input Image -> Upscale or Variation -> Upscale (1.5x) / Upscale (2x) |
|
44 |
+
| Inpaint / Up / Down / Left / Right (Pan) | Input Image -> Inpaint or Outpaint -> Inpaint / Up / Down / Left / Right <br> (Fooocus uses its own inpaint algorithm and inpaint models so that results are more satisfying than all other software that uses standard SDXL inpaint method/model) |
|
45 |
+
| Image Prompt | Input Image -> Image Prompt <br> (Fooocus uses its own image prompt algorithm so that result quality and prompt understanding are more satisfying than all other software that uses standard SDXL methods like standard IP-Adapters or Revisions) |
|
46 |
+
| --style | Advanced -> Style |
|
47 |
+
| --stylize | Advanced -> Advanced -> Guidance |
|
48 |
+
| --niji | [Multiple launchers: "run.bat", "run_anime.bat", and "run_realistic.bat".](https://github.com/lllyasviel/Fooocus/discussions/679) <br> Fooocus support SDXL models on Civitai <br> (You can google search “Civitai” if you do not know about it) |
|
49 |
+
| --quality | Advanced -> Quality |
|
50 |
+
| --repeat | Advanced -> Image Number |
|
51 |
+
| Multi Prompts (::) | Just use multiple lines of prompts |
|
52 |
+
| Prompt Weights | You can use " I am (happy:1.5)". <br> Fooocus uses A1111's reweighting algorithm so that results are better than ComfyUI if users directly copy prompts from Civitai. (Because if prompts are written in ComfyUI's reweighting, users are less likely to copy prompt texts as they prefer dragging files) <br> To use embedding, you can use "(embedding:file_name:1.1)" |
|
53 |
+
| --no | Advanced -> Negative Prompt |
|
54 |
+
| --ar | Advanced -> Aspect Ratios |
|
55 |
+
| InsightFace | Input Image -> Image Prompt -> Advanced -> FaceSwap |
|
56 |
+
| Describe | Input Image -> Describe |
|
57 |
+
|
58 |
+
We also have a few things borrowed from the best parts of LeonardoAI:
|
59 |
+
|
60 |
+
| LeonardoAI | Fooocus |
|
61 |
+
| - | - |
|
62 |
+
| Prompt Magic | Advanced -> Style -> Fooocus V2 |
|
63 |
+
| Advanced Sampler Parameters (like Contrast/Sharpness/etc) | Advanced -> Advanced -> Sampling Sharpness / etc |
|
64 |
+
| User-friendly ControlNets | Input Image -> Image Prompt -> Advanced |
|
65 |
+
|
66 |
+
Fooocus also developed many "fooocus-only" features for advanced users to get perfect results. [Click here to browse the advanced features.](https://github.com/lllyasviel/Fooocus/discussions/117)
|
67 |
+
|
68 |
+
# Download
|
69 |
+
|
70 |
+
### Windows
|
71 |
+
|
72 |
+
You can directly download Fooocus with:
|
73 |
+
|
74 |
+
**[>>> Click here to download <<<](https://github.com/lllyasviel/Fooocus/releases/download/release/Fooocus_win64_2-1-831.7z)**
|
75 |
+
|
76 |
+
After you download the file, please uncompress it and then run the "run.bat".
|
77 |
+
|
78 |
+
![image](https://github.com/lllyasviel/Fooocus/assets/19834515/c49269c4-c274-4893-b368-047c401cc58c)
|
79 |
+
|
80 |
+
The first time you launch the software, it will automatically download models:
|
81 |
+
|
82 |
+
1. It will download [default models](#models) to the folder "Fooocus\models\checkpoints" given different presets. You can download them in advance if you do not want automatic download.
|
83 |
+
2. Note that if you use inpaint, at the first time you inpaint an image, it will download [Fooocus's own inpaint control model from here](https://huggingface.co/lllyasviel/fooocus_inpaint/resolve/main/inpaint_v26.fooocus.patch) as the file "Fooocus\models\inpaint\inpaint_v26.fooocus.patch" (the size of this file is 1.28GB).
|
84 |
+
|
85 |
+
After Fooocus 2.1.60, you will also have `run_anime.bat` and `run_realistic.bat`. They are different model presets (and require different models, but they will be automatically downloaded). [Check here for more details](https://github.com/lllyasviel/Fooocus/discussions/679).
|
86 |
+
|
87 |
+
![image](https://github.com/lllyasviel/Fooocus/assets/19834515/d386f817-4bd7-490c-ad89-c1e228c23447)
|
88 |
+
|
89 |
+
If you already have these files, you can copy them to the above locations to speed up installation.
|
90 |
+
|
91 |
+
Note that if you see **"MetadataIncompleteBuffer" or "PytorchStreamReader"**, then your model files are corrupted. Please download models again.
|
92 |
+
|
93 |
+
Below is a test on a relatively low-end laptop with **16GB System RAM** and **6GB VRAM** (Nvidia 3060 laptop). The speed on this machine is about 1.35 seconds per iteration. Pretty impressive – nowadays laptops with 3060 are usually at very acceptable price.
|
94 |
+
|
95 |
+
![image](https://github.com/lllyasviel/Fooocus/assets/19834515/938737a5-b105-4f19-b051-81356cb7c495)
|
96 |
+
|
97 |
+
Besides, recently many other software report that Nvidia driver above 532 is sometimes 10x slower than Nvidia driver 531. If your generation time is very long, consider download [Nvidia Driver 531 Laptop](https://www.nvidia.com/download/driverResults.aspx/199991/en-us/) or [Nvidia Driver 531 Desktop](https://www.nvidia.com/download/driverResults.aspx/199990/en-us/).
|
98 |
+
|
99 |
+
Note that the minimal requirement is **4GB Nvidia GPU memory (4GB VRAM)** and **8GB system memory (8GB RAM)**. This requires using Microsoft’s Virtual Swap technique, which is automatically enabled by your Windows installation in most cases, so you often do not need to do anything about it. However, if you are not sure, or if you manually turned it off (would anyone really do that?), or **if you see any "RuntimeError: CPUAllocator"**, you can enable it here:
|
100 |
+
|
101 |
+
<details>
|
102 |
+
<summary>Click here to see the image instructions. </summary>
|
103 |
+
|
104 |
+
![image](https://github.com/lllyasviel/Fooocus/assets/19834515/2a06b130-fe9b-4504-94f1-2763be4476e9)
|
105 |
+
|
106 |
+
**And make sure that you have at least 40GB free space on each drive if you still see "RuntimeError: CPUAllocator" !**
|
107 |
+
|
108 |
+
</details>
|
109 |
+
|
110 |
+
Please open an issue if you use similar devices but still cannot achieve acceptable performances.
|
111 |
+
|
112 |
+
Note that the [minimal requirement](#minimal-requirement) for different platforms is different.
|
113 |
+
|
114 |
+
See also the common problems and troubleshoots [here](troubleshoot.md).
|
115 |
+
|
116 |
+
### Colab
|
117 |
+
|
118 |
+
(Last tested - 2023 Dec 12)
|
119 |
+
|
120 |
+
| Colab | Info
|
121 |
+
| --- | --- |
|
122 |
+
[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lllyasviel/Fooocus/blob/main/fooocus_colab.ipynb) | Fooocus Official
|
123 |
+
|
124 |
+
In Colab, you can modify the last line to `!python entry_with_update.py --share` or `!python entry_with_update.py --preset anime --share` or `!python entry_with_update.py --preset realistic --share` for Fooocus Default/Anime/Realistic Edition.
|
125 |
+
|
126 |
+
Note that this Colab will disable refiner by default because Colab free's resources are relatively limited (and some "big" features like image prompt may cause free-tier Colab to disconnect). We make sure that basic text-to-image is always working on free-tier Colab.
|
127 |
+
|
128 |
+
Thanks to [camenduru](https://github.com/camenduru)!
|
129 |
+
|
130 |
+
### Linux (Using Anaconda)
|
131 |
+
|
132 |
+
If you want to use Anaconda/Miniconda, you can
|
133 |
+
|
134 |
+
git clone https://github.com/lllyasviel/Fooocus.git
|
135 |
+
cd Fooocus
|
136 |
+
conda env create -f environment.yaml
|
137 |
+
conda activate fooocus
|
138 |
+
pip install -r requirements_versions.txt
|
139 |
+
|
140 |
+
Then download the models: download [default models](#models) to the folder "Fooocus\models\checkpoints". **Or let Fooocus automatically download the models** using the launcher:
|
141 |
+
|
142 |
+
conda activate fooocus
|
143 |
+
python entry_with_update.py
|
144 |
+
|
145 |
+
Or, if you want to open a remote port, use
|
146 |
+
|
147 |
+
conda activate fooocus
|
148 |
+
python entry_with_update.py --listen
|
149 |
+
|
150 |
+
Use `python entry_with_update.py --preset anime` or `python entry_with_update.py --preset realistic` for Fooocus Anime/Realistic Edition.
|
151 |
+
|
152 |
+
### Linux (Using Python Venv)
|
153 |
+
|
154 |
+
Your Linux needs to have **Python 3.10** installed, and let's say your Python can be called with the command **python3** with your venv system working; you can
|
155 |
+
|
156 |
+
git clone https://github.com/lllyasviel/Fooocus.git
|
157 |
+
cd Fooocus
|
158 |
+
python3 -m venv fooocus_env
|
159 |
+
source fooocus_env/bin/activate
|
160 |
+
pip install -r requirements_versions.txt
|
161 |
+
|
162 |
+
See the above sections for model downloads. You can launch the software with:
|
163 |
+
|
164 |
+
source fooocus_env/bin/activate
|
165 |
+
python entry_with_update.py
|
166 |
+
|
167 |
+
Or, if you want to open a remote port, use
|
168 |
+
|
169 |
+
source fooocus_env/bin/activate
|
170 |
+
python entry_with_update.py --listen
|
171 |
+
|
172 |
+
Use `python entry_with_update.py --preset anime` or `python entry_with_update.py --preset realistic` for Fooocus Anime/Realistic Edition.
|
173 |
+
|
174 |
+
### Linux (Using native system Python)
|
175 |
+
|
176 |
+
If you know what you are doing, and your Linux already has **Python 3.10** installed, and your Python can be called with the command **python3** (and Pip with **pip3**), you can
|
177 |
+
|
178 |
+
git clone https://github.com/lllyasviel/Fooocus.git
|
179 |
+
cd Fooocus
|
180 |
+
pip3 install -r requirements_versions.txt
|
181 |
+
|
182 |
+
See the above sections for model downloads. You can launch the software with:
|
183 |
+
|
184 |
+
python3 entry_with_update.py
|
185 |
+
|
186 |
+
Or, if you want to open a remote port, use
|
187 |
+
|
188 |
+
python3 entry_with_update.py --listen
|
189 |
+
|
190 |
+
Use `python entry_with_update.py --preset anime` or `python entry_with_update.py --preset realistic` for Fooocus Anime/Realistic Edition.
|
191 |
+
|
192 |
+
### Linux (AMD GPUs)
|
193 |
+
|
194 |
+
Note that the [minimal requirement](#minimal-requirement) for different platforms is different.
|
195 |
+
|
196 |
+
Same with the above instructions. You need to change torch to the AMD version
|
197 |
+
|
198 |
+
pip uninstall torch torchvision torchaudio torchtext functorch xformers
|
199 |
+
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm5.6
|
200 |
+
|
201 |
+
AMD is not intensively tested, however. The AMD support is in beta.
|
202 |
+
|
203 |
+
Use `python entry_with_update.py --preset anime` or `python entry_with_update.py --preset realistic` for Fooocus Anime/Realistic Edition.
|
204 |
+
|
205 |
+
### Windows(AMD GPUs)
|
206 |
+
|
207 |
+
Note that the [minimal requirement](#minimal-requirement) for different platforms is different.
|
208 |
+
|
209 |
+
Same with Windows. Download the software and edit the content of `run.bat` as:
|
210 |
+
|
211 |
+
.\python_embeded\python.exe -m pip uninstall torch torchvision torchaudio torchtext functorch xformers -y
|
212 |
+
.\python_embeded\python.exe -m pip install torch-directml
|
213 |
+
.\python_embeded\python.exe -s Fooocus\entry_with_update.py --directml
|
214 |
+
pause
|
215 |
+
|
216 |
+
Then run the `run.bat`.
|
217 |
+
|
218 |
+
AMD is not intensively tested, however. The AMD support is in beta.
|
219 |
+
|
220 |
+
For AMD, use `.\python_embeded\python.exe entry_with_update.py --directml --preset anime` or `.\python_embeded\python.exe entry_with_update.py --directml --preset realistic` for Fooocus Anime/Realistic Edition.
|
221 |
+
|
222 |
+
### Mac
|
223 |
+
|
224 |
+
Note that the [minimal requirement](#minimal-requirement) for different platforms is different.
|
225 |
+
|
226 |
+
Mac is not intensively tested. Below is an unofficial guideline for using Mac. You can discuss problems [here](https://github.com/lllyasviel/Fooocus/pull/129).
|
227 |
+
|
228 |
+
You can install Fooocus on Apple Mac silicon (M1 or M2) with macOS 'Catalina' or a newer version. Fooocus runs on Apple silicon computers via [PyTorch](https://pytorch.org/get-started/locally/) MPS device acceleration. Mac Silicon computers don't come with a dedicated graphics card, resulting in significantly longer image processing times compared to computers with dedicated graphics cards.
|
229 |
+
|
230 |
+
1. Install the conda package manager and pytorch nightly. Read the [Accelerated PyTorch training on Mac](https://developer.apple.com/metal/pytorch/) Apple Developer guide for instructions. Make sure pytorch recognizes your MPS device.
|
231 |
+
1. Open the macOS Terminal app and clone this repository with `git clone https://github.com/lllyasviel/Fooocus.git`.
|
232 |
+
1. Change to the new Fooocus directory, `cd Fooocus`.
|
233 |
+
1. Create a new conda environment, `conda env create -f environment.yaml`.
|
234 |
+
1. Activate your new conda environment, `conda activate fooocus`.
|
235 |
+
1. Install the packages required by Fooocus, `pip install -r requirements_versions.txt`.
|
236 |
+
1. Launch Fooocus by running `python entry_with_update.py`. (Some Mac M2 users may need `python entry_with_update.py --disable-offload-from-vram` to speed up model loading/unloading.) The first time you run Fooocus, it will automatically download the Stable Diffusion SDXL models and will take a significant amount of time, depending on your internet connection.
|
237 |
+
|
238 |
+
Use `python entry_with_update.py --preset anime` or `python entry_with_update.py --preset realistic` for Fooocus Anime/Realistic Edition.
|
239 |
+
|
240 |
+
### Download Previous Version
|
241 |
+
|
242 |
+
See the guidelines [here](https://github.com/lllyasviel/Fooocus/discussions/1405).
|
243 |
+
|
244 |
+
## Minimal Requirement
|
245 |
+
|
246 |
+
Below is the minimal requirement for running Fooocus locally. If your device capability is lower than this spec, you may not be able to use Fooocus locally. (Please let us know, in any case, if your device capability is lower but Fooocus still works.)
|
247 |
+
|
248 |
+
| Operating System | GPU | Minimal GPU Memory | Minimal System Memory | [System Swap](troubleshoot.md) | Note |
|
249 |
+
|-------------------|------------------------------|------------------------------|---------------------------|--------------------------------|----------------------------------------------------------------------------|
|
250 |
+
| Windows/Linux | Nvidia RTX 4XXX | 4GB | 8GB | Required | fastest |
|
251 |
+
| Windows/Linux | Nvidia RTX 3XXX | 4GB | 8GB | Required | usually faster than RTX 2XXX |
|
252 |
+
| Windows/Linux | Nvidia RTX 2XXX | 4GB | 8GB | Required | usually faster than GTX 1XXX |
|
253 |
+
| Windows/Linux | Nvidia GTX 1XXX | 8GB (* 6GB uncertain) | 8GB | Required | only marginally faster than CPU |
|
254 |
+
| Windows/Linux | Nvidia GTX 9XX | 8GB | 8GB | Required | faster or slower than CPU |
|
255 |
+
| Windows/Linux | Nvidia GTX < 9XX | Not supported | / | / | / |
|
256 |
+
| Windows | AMD GPU | 8GB (updated 2023 Dec 30) | 8GB | Required | via DirectML (* ROCm is on hold), about 3x slower than Nvidia RTX 3XXX |
|
257 |
+
| Linux | AMD GPU | 8GB | 8GB | Required | via ROCm, about 1.5x slower than Nvidia RTX 3XXX |
|
258 |
+
| Mac | M1/M2 MPS | Shared | Shared | Shared | about 9x slower than Nvidia RTX 3XXX |
|
259 |
+
| Windows/Linux/Mac | only use CPU | 0GB | 32GB | Required | about 17x slower than Nvidia RTX 3XXX |
|
260 |
+
|
261 |
+
* AMD GPU ROCm (on hold): The AMD is still working on supporting ROCm on Windows.
|
262 |
+
|
263 |
+
* Nvidia GTX 1XXX 6GB uncertain: Some people report 6GB success on GTX 10XX, but some other people report failure cases.
|
264 |
+
|
265 |
+
*Note that Fooocus is only for extremely high quality image generating. We will not support smaller models to reduce the requirement and sacrifice result quality.*
|
266 |
+
|
267 |
+
## Troubleshoot
|
268 |
+
|
269 |
+
See the common problems [here](troubleshoot.md).
|
270 |
+
|
271 |
+
## Default Models
|
272 |
+
<a name="models"></a>
|
273 |
+
|
274 |
+
Given different goals, the default models and configs of Fooocus are different:
|
275 |
+
|
276 |
+
| Task | Windows | Linux args | Main Model | Refiner | Config |
|
277 |
+
| --- | --- | --- | --- | --- | --- |
|
278 |
+
| General | run.bat | | [juggernautXL v6_RunDiffusion](https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/juggernautXL_version6Rundiffusion.safetensors) | not used | [here](https://github.com/lllyasviel/Fooocus/blob/main/modules/path.py) |
|
279 |
+
| Realistic | run_realistic.bat | --preset realistic | [realistic_stock_photo](https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/realisticStockPhoto_v10.safetensors) | not used | [here](https://github.com/lllyasviel/Fooocus/blob/main/presets/realistic.json) |
|
280 |
+
| Anime | run_anime.bat | --preset anime | [bluepencil_v50](https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/bluePencilXL_v050.safetensors) | [dreamsharper_v8](https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/DreamShaper_8_pruned.safetensors) (SD1.5) | [here](https://github.com/lllyasviel/Fooocus/blob/main/presets/anime.json) |
|
281 |
+
|
282 |
+
Note that the download is **automatic** - you do not need to do anything if the internet connection is okay. However, you can download them manually if you (or move them from somewhere else) have your own preparation.
|
283 |
+
|
284 |
+
## List of "Hidden" Tricks
|
285 |
+
<a name="tech_list"></a>
|
286 |
+
|
287 |
+
The below things are already inside the software, and **users do not need to do anything about these**.
|
288 |
+
|
289 |
+
1. GPT2-based [prompt expansion as a dynamic style "Fooocus V2".](https://github.com/lllyasviel/Fooocus/discussions/117#raw) (similar to Midjourney's hidden pre-processsing and "raw" mode, or the LeonardoAI's Prompt Magic).
|
290 |
+
2. Native refiner swap inside one single k-sampler. The advantage is that the refiner model can now reuse the base model's momentum (or ODE's history parameters) collected from k-sampling to achieve more coherent sampling. In Automatic1111's high-res fix and ComfyUI's node system, the base model and refiner use two independent k-samplers, which means the momentum is largely wasted, and the sampling continuity is broken. Fooocus uses its own advanced k-diffusion sampling that ensures seamless, native, and continuous swap in a refiner setup. (Update Aug 13: Actually, I discussed this with Automatic1111 several days ago, and it seems that the “native refiner swap inside one single k-sampler” is [merged]( https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12371) into the dev branch of webui. Great!)
|
291 |
+
3. Negative ADM guidance. Because the highest resolution level of XL Base does not have cross attentions, the positive and negative signals for XL's highest resolution level cannot receive enough contrasts during the CFG sampling, causing the results to look a bit plastic or overly smooth in certain cases. Fortunately, since the XL's highest resolution level is still conditioned on image aspect ratios (ADM), we can modify the adm on the positive/negative side to compensate for the lack of CFG contrast in the highest resolution level. (Update Aug 16, the IOS App [Drawing Things](https://apps.apple.com/us/app/draw-things-ai-generation/id6444050820) will support Negative ADM Guidance. Great!)
|
292 |
+
4. We implemented a carefully tuned variation of Section 5.1 of ["Improving Sample Quality of Diffusion Models Using Self-Attention Guidance"](https://arxiv.org/pdf/2210.00939.pdf). The weight is set to very low, but this is Fooocus's final guarantee to make sure that the XL will never yield an overly smooth or plastic appearance (examples [here](https://github.com/lllyasviel/Fooocus/discussions/117#sharpness)). This can almost eliminate all cases for which XL still occasionally produces overly smooth results, even with negative ADM guidance. (Update 2023 Aug 18, the Gaussian kernel of SAG is changed to an anisotropic kernel for better structure preservation and fewer artifacts.)
|
293 |
+
5. We modified the style templates a bit and added the "cinematic-default".
|
294 |
+
6. We tested the "sd_xl_offset_example-lora_1.0.safetensors" and it seems that when the lora weight is below 0.5, the results are always better than XL without lora.
|
295 |
+
7. The parameters of samplers are carefully tuned.
|
296 |
+
8. Because XL uses positional encoding for generation resolution, images generated by several fixed resolutions look a bit better than those from arbitrary resolutions (because the positional encoding is not very good at handling int numbers that are unseen during training). This suggests that the resolutions in UI may be hard coded for best results.
|
297 |
+
9. Separated prompts for two different text encoders seem unnecessary. Separated prompts for the base model and refiner may work, but the effects are random, and we refrain from implementing this.
|
298 |
+
10. The DPM family seems well-suited for XL since XL sometimes generates overly smooth texture, but the DPM family sometimes generates overly dense detail in texture. Their joint effect looks neutral and appealing to human perception.
|
299 |
+
11. A carefully designed system for balancing multiple styles as well as prompt expansion.
|
300 |
+
12. Using automatic1111's method to normalize prompt emphasizing. This significantly improves results when users directly copy prompts from civitai.
|
301 |
+
13. The joint swap system of the refiner now also supports img2img and upscale in a seamless way.
|
302 |
+
14. CFG Scale and TSNR correction (tuned for SDXL) when CFG is bigger than 10.
|
303 |
+
|
304 |
+
## Customization
|
305 |
+
|
306 |
+
After the first time you run Fooocus, a config file will be generated at `Fooocus\config.txt`. This file can be edited to change the model path or default parameters.
|
307 |
+
|
308 |
+
For example, an edited `Fooocus\config.txt` (this file will be generated after the first launch) may look like this:
|
309 |
+
|
310 |
+
```json
|
311 |
+
{
|
312 |
+
"path_checkpoints": "D:\\Fooocus\\models\\checkpoints",
|
313 |
+
"path_loras": "D:\\Fooocus\\models\\loras",
|
314 |
+
"path_embeddings": "D:\\Fooocus\\models\\embeddings",
|
315 |
+
"path_vae_approx": "D:\\Fooocus\\models\\vae_approx",
|
316 |
+
"path_upscale_models": "D:\\Fooocus\\models\\upscale_models",
|
317 |
+
"path_inpaint": "D:\\Fooocus\\models\\inpaint",
|
318 |
+
"path_controlnet": "D:\\Fooocus\\models\\controlnet",
|
319 |
+
"path_clip_vision": "D:\\Fooocus\\models\\clip_vision",
|
320 |
+
"path_fooocus_expansion": "D:\\Fooocus\\models\\prompt_expansion\\fooocus_expansion",
|
321 |
+
"path_outputs": "D:\\Fooocus\\outputs",
|
322 |
+
"default_model": "realisticStockPhoto_v10.safetensors",
|
323 |
+
"default_refiner": "",
|
324 |
+
"default_loras": [["lora_filename_1.safetensors", 0.5], ["lora_filename_2.safetensors", 0.5]],
|
325 |
+
"default_cfg_scale": 3.0,
|
326 |
+
"default_sampler": "dpmpp_2m",
|
327 |
+
"default_scheduler": "karras",
|
328 |
+
"default_negative_prompt": "low quality",
|
329 |
+
"default_positive_prompt": "",
|
330 |
+
"default_styles": [
|
331 |
+
"Fooocus V2",
|
332 |
+
"Fooocus Photograph",
|
333 |
+
"Fooocus Negative"
|
334 |
+
]
|
335 |
+
}
|
336 |
+
```
|
337 |
+
|
338 |
+
Many other keys, formats, and examples are in `Fooocus\config_modification_tutorial.txt` (this file will be generated after the first launch).
|
339 |
+
|
340 |
+
Consider twice before you really change the config. If you find yourself breaking things, just delete `Fooocus\config.txt`. Fooocus will go back to default.
|
341 |
+
|
342 |
+
A safer way is just to try "run_anime.bat" or "run_realistic.bat" - they should already be good enough for different tasks.
|
343 |
+
|
344 |
+
~Note that `user_path_config.txt` is deprecated and will be removed soon.~ (Edit: it is already removed.)
|
345 |
+
|
346 |
+
### All CMD Flags
|
347 |
+
|
348 |
+
```
|
349 |
+
entry_with_update.py [-h] [--listen [IP]] [--port PORT]
|
350 |
+
[--disable-header-check [ORIGIN]]
|
351 |
+
[--web-upload-size WEB_UPLOAD_SIZE]
|
352 |
+
[--external-working-path PATH [PATH ...]]
|
353 |
+
[--output-path OUTPUT_PATH] [--temp-path TEMP_PATH]
|
354 |
+
[--cache-path CACHE_PATH] [--in-browser]
|
355 |
+
[--disable-in-browser] [--gpu-device-id DEVICE_ID]
|
356 |
+
[--async-cuda-allocation | --disable-async-cuda-allocation]
|
357 |
+
[--disable-attention-upcast] [--all-in-fp32 | --all-in-fp16]
|
358 |
+
[--unet-in-bf16 | --unet-in-fp16 | --unet-in-fp8-e4m3fn | --unet-in-fp8-e5m2]
|
359 |
+
[--vae-in-fp16 | --vae-in-fp32 | --vae-in-bf16]
|
360 |
+
[--clip-in-fp8-e4m3fn | --clip-in-fp8-e5m2 | --clip-in-fp16 | --clip-in-fp32]
|
361 |
+
[--directml [DIRECTML_DEVICE]] [--disable-ipex-hijack]
|
362 |
+
[--preview-option [none,auto,fast,taesd]]
|
363 |
+
[--attention-split | --attention-quad | --attention-pytorch]
|
364 |
+
[--disable-xformers]
|
365 |
+
[--always-gpu | --always-high-vram | --always-normal-vram |
|
366 |
+
--always-low-vram | --always-no-vram | --always-cpu]
|
367 |
+
[--always-offload-from-vram] [--disable-server-log]
|
368 |
+
[--debug-mode] [--is-windows-embedded-python]
|
369 |
+
[--disable-server-info] [--share] [--preset PRESET]
|
370 |
+
[--language LANGUAGE] [--disable-offload-from-vram]
|
371 |
+
[--theme THEME] [--disable-image-log]
|
372 |
+
```
|
373 |
+
|
374 |
+
## Advanced Features
|
375 |
+
|
376 |
+
[Click here to browse the advanced features.](https://github.com/lllyasviel/Fooocus/discussions/117)
|
377 |
+
|
378 |
+
Fooocus also has many community forks, just like SD-WebUI's [vladmandic/automatic](https://github.com/vladmandic/automatic) and [anapnoe/stable-diffusion-webui-ux](https://github.com/anapnoe/stable-diffusion-webui-ux), for enthusiastic users who want to try!
|
379 |
+
|
380 |
+
| Fooocus' forks |
|
381 |
+
| - |
|
382 |
+
| [fenneishi/Fooocus-Control](https://github.com/fenneishi/Fooocus-Control) </br>[runew0lf/RuinedFooocus](https://github.com/runew0lf/RuinedFooocus) </br> [MoonRide303/Fooocus-MRE](https://github.com/MoonRide303/Fooocus-MRE) </br> [metercai/SimpleSDXL](https://github.com/metercai/SimpleSDXL) </br> and so on ... |
|
383 |
+
|
384 |
+
See also [About Forking and Promotion of Forks](https://github.com/lllyasviel/Fooocus/discussions/699).
|
385 |
+
|
386 |
+
## Thanks
|
387 |
+
|
388 |
+
Special thanks to [twri](https://github.com/twri) and [3Diva](https://github.com/3Diva) and [Marc K3nt3L](https://github.com/K3nt3L) for creating additional SDXL styles available in Fooocus. Thanks [daswer123](https://github.com/daswer123) for contributing the Canvas Zoom!
|
389 |
+
|
390 |
+
## Update Log
|
391 |
+
|
392 |
+
The log is [here](update_log.md).
|
393 |
+
|
394 |
+
## Localization/Translation/I18N
|
395 |
+
|
396 |
+
**We need your help!** Please help translate Fooocus into international languages.
|
397 |
+
|
398 |
+
You can put json files in the `language` folder to translate the user interface.
|
399 |
+
|
400 |
+
For example, below is the content of `Fooocus/language/example.json`:
|
401 |
+
|
402 |
+
```json
|
403 |
+
{
|
404 |
+
"Generate": "生成",
|
405 |
+
"Input Image": "入力画像",
|
406 |
+
"Advanced": "고급",
|
407 |
+
"SAI 3D Model": "SAI 3D Modèle"
|
408 |
+
}
|
409 |
+
```
|
410 |
+
|
411 |
+
If you add `--language example` arg, Fooocus will read `Fooocus/language/example.json` to translate the UI.
|
412 |
+
|
413 |
+
For example, you can edit the ending line of Windows `run.bat` as
|
414 |
+
|
415 |
+
.\python_embeded\python.exe -s Fooocus\entry_with_update.py --language example
|
416 |
+
|
417 |
+
Or `run_anime.bat` as
|
418 |
+
|
419 |
+
.\python_embeded\python.exe -s Fooocus\entry_with_update.py --language example --preset anime
|
420 |
+
|
421 |
+
Or `run_realistic.bat` as
|
422 |
+
|
423 |
+
.\python_embeded\python.exe -s Fooocus\entry_with_update.py --language example --preset realistic
|
424 |
+
|
425 |
+
For practical translation, you may create your own file like `Fooocus/language/jp.json` or `Fooocus/language/cn.json` and then use flag `--language jp` or `--language cn`. Apparently, these files do not exist now. **We need your help to create these files!**
|
426 |
+
|
427 |
+
Note that if no `--language` is given and at the same time `Fooocus/language/default.json` exists, Fooocus will always load `Fooocus/language/default.json` for translation. By default, the file `Fooocus/language/default.json` does not exist.
|
args_manager.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import ldm_patched.modules.args_parser as args_parser
|
2 |
+
|
3 |
+
|
4 |
+
args_parser.parser.add_argument("--share", action='store_true', help="Set whether to share on Gradio.")
|
5 |
+
args_parser.parser.add_argument("--preset", type=str, default=None, help="Apply specified UI preset.")
|
6 |
+
|
7 |
+
args_parser.parser.add_argument("--language", type=str, default='default',
|
8 |
+
help="Translate UI using json files in [language] folder. "
|
9 |
+
"For example, [--language example] will use [language/example.json] for translation.")
|
10 |
+
|
11 |
+
# For example, https://github.com/lllyasviel/Fooocus/issues/849
|
12 |
+
args_parser.parser.add_argument("--disable-offload-from-vram", action="store_true",
|
13 |
+
help="Force loading models to vram when the unload can be avoided. "
|
14 |
+
"Some Mac users may need this.")
|
15 |
+
|
16 |
+
args_parser.parser.add_argument("--theme", type=str, help="launches the UI with light or dark theme", default=None)
|
17 |
+
args_parser.parser.add_argument("--disable-image-log", action='store_true',
|
18 |
+
help="Prevent writing images and logs to hard drive.")
|
19 |
+
|
20 |
+
args_parser.parser.add_argument("--disable-analytics", action='store_true',
|
21 |
+
help="Disables analytics for Gradio", default=False)
|
22 |
+
|
23 |
+
args_parser.parser.set_defaults(
|
24 |
+
disable_cuda_malloc=True,
|
25 |
+
in_browser=True,
|
26 |
+
port=None
|
27 |
+
)
|
28 |
+
|
29 |
+
args_parser.args = args_parser.parser.parse_args()
|
30 |
+
|
31 |
+
# (Disable by default because of issues like https://github.com/lllyasviel/Fooocus/issues/724)
|
32 |
+
args_parser.args.always_offload_from_vram = not args_parser.args.disable_offload_from_vram
|
33 |
+
|
34 |
+
if args_parser.args.disable_analytics:
|
35 |
+
import os
|
36 |
+
os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
|
37 |
+
if args_parser.args.disable_in_browser:
|
38 |
+
args_parser.args.in_browser = False
|
39 |
+
|
40 |
+
args = args_parser.args
|
auth-example.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"user": "sitting-duck-1",
|
4 |
+
"pass": "very-bad-publicly-known-password-change-it"
|
5 |
+
}
|
6 |
+
]
|
build_launcher.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
win32_root = os.path.dirname(os.path.dirname(__file__))
|
4 |
+
python_embeded_path = os.path.join(win32_root, 'python_embeded')
|
5 |
+
|
6 |
+
is_win32_standalone_build = os.path.exists(python_embeded_path) and os.path.isdir(python_embeded_path)
|
7 |
+
|
8 |
+
win32_cmd = '''
|
9 |
+
.\python_embeded\python.exe -s Fooocus\entry_with_update.py {cmds} %*
|
10 |
+
pause
|
11 |
+
'''
|
12 |
+
|
13 |
+
|
14 |
+
def build_launcher():
|
15 |
+
if not is_win32_standalone_build:
|
16 |
+
return
|
17 |
+
|
18 |
+
presets = [None, 'anime', 'realistic']
|
19 |
+
|
20 |
+
for preset in presets:
|
21 |
+
win32_cmd_preset = win32_cmd.replace('{cmds}', '' if preset is None else f'--preset {preset}')
|
22 |
+
bat_path = os.path.join(win32_root, 'run.bat' if preset is None else f'run_{preset}.bat')
|
23 |
+
if not os.path.exists(bat_path):
|
24 |
+
with open(bat_path, "w", encoding="utf-8") as f:
|
25 |
+
f.write(win32_cmd_preset)
|
26 |
+
return
|
css/style.css
ADDED
@@ -0,0 +1,220 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/* based on https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/v1.6.0/style.css */
|
2 |
+
|
3 |
+
#context-menu{
|
4 |
+
z-index:9999;
|
5 |
+
position:absolute;
|
6 |
+
display:block;
|
7 |
+
padding:0px 0;
|
8 |
+
border:2px solid #a55000;
|
9 |
+
border-radius:8px;
|
10 |
+
box-shadow:1px 1px 2px #CE6400;
|
11 |
+
width: 200px;
|
12 |
+
}
|
13 |
+
|
14 |
+
.context-menu-items{
|
15 |
+
list-style: none;
|
16 |
+
margin: 0;
|
17 |
+
padding: 0;
|
18 |
+
}
|
19 |
+
|
20 |
+
.context-menu-items a{
|
21 |
+
display:block;
|
22 |
+
padding:5px;
|
23 |
+
cursor:pointer;
|
24 |
+
}
|
25 |
+
|
26 |
+
.context-menu-items a:hover{
|
27 |
+
background: #a55000;
|
28 |
+
}
|
29 |
+
|
30 |
+
.canvas-tooltip-info {
|
31 |
+
position: absolute;
|
32 |
+
top: 28px;
|
33 |
+
left: 2px;
|
34 |
+
cursor: help;
|
35 |
+
background-color: rgba(0, 0, 0, 0.3);
|
36 |
+
width: 20px;
|
37 |
+
height: 20px;
|
38 |
+
border-radius: 50%;
|
39 |
+
display: flex;
|
40 |
+
align-items: center;
|
41 |
+
justify-content: center;
|
42 |
+
flex-direction: column;
|
43 |
+
|
44 |
+
z-index: 100;
|
45 |
+
}
|
46 |
+
|
47 |
+
.canvas-tooltip-info::after {
|
48 |
+
content: '';
|
49 |
+
display: block;
|
50 |
+
width: 2px;
|
51 |
+
height: 7px;
|
52 |
+
background-color: white;
|
53 |
+
margin-top: 2px;
|
54 |
+
}
|
55 |
+
|
56 |
+
.canvas-tooltip-info::before {
|
57 |
+
content: '';
|
58 |
+
display: block;
|
59 |
+
width: 2px;
|
60 |
+
height: 2px;
|
61 |
+
background-color: white;
|
62 |
+
}
|
63 |
+
|
64 |
+
.canvas-tooltip-content {
|
65 |
+
display: none;
|
66 |
+
background-color: #f9f9f9;
|
67 |
+
color: #333;
|
68 |
+
border: 1px solid #ddd;
|
69 |
+
padding: 15px;
|
70 |
+
position: absolute;
|
71 |
+
top: 40px;
|
72 |
+
left: 10px;
|
73 |
+
width: 250px;
|
74 |
+
font-size: 16px;
|
75 |
+
opacity: 0;
|
76 |
+
border-radius: 8px;
|
77 |
+
box-shadow: 0px 8px 16px 0px rgba(0,0,0,0.2);
|
78 |
+
|
79 |
+
z-index: 100;
|
80 |
+
}
|
81 |
+
|
82 |
+
.canvas-tooltip:hover .canvas-tooltip-content {
|
83 |
+
display: block;
|
84 |
+
animation: fadeIn 0.5s;
|
85 |
+
opacity: 1;
|
86 |
+
}
|
87 |
+
|
88 |
+
@keyframes fadeIn {
|
89 |
+
from {opacity: 0;}
|
90 |
+
to {opacity: 1;}
|
91 |
+
}
|
92 |
+
|
93 |
+
.styler {
|
94 |
+
overflow:inherit !important;
|
95 |
+
}
|
96 |
+
|
97 |
+
.gradio-container{
|
98 |
+
overflow: visible;
|
99 |
+
}
|
100 |
+
|
101 |
+
/* fullpage image viewer */
|
102 |
+
|
103 |
+
#lightboxModal{
|
104 |
+
display: none;
|
105 |
+
position: fixed;
|
106 |
+
z-index: 1001;
|
107 |
+
left: 0;
|
108 |
+
top: 0;
|
109 |
+
width: 100%;
|
110 |
+
height: 100%;
|
111 |
+
overflow: auto;
|
112 |
+
background-color: rgba(20, 20, 20, 0.95);
|
113 |
+
user-select: none;
|
114 |
+
-webkit-user-select: none;
|
115 |
+
flex-direction: column;
|
116 |
+
}
|
117 |
+
|
118 |
+
.modalControls {
|
119 |
+
display: flex;
|
120 |
+
position: absolute;
|
121 |
+
right: 0px;
|
122 |
+
left: 0px;
|
123 |
+
gap: 1em;
|
124 |
+
padding: 1em;
|
125 |
+
background-color:rgba(0,0,0,0);
|
126 |
+
z-index: 1;
|
127 |
+
transition: 0.2s ease background-color;
|
128 |
+
}
|
129 |
+
.modalControls:hover {
|
130 |
+
background-color:rgba(0,0,0,0.9);
|
131 |
+
}
|
132 |
+
.modalClose {
|
133 |
+
margin-left: auto;
|
134 |
+
}
|
135 |
+
.modalControls span{
|
136 |
+
color: white;
|
137 |
+
text-shadow: 0px 0px 0.25em black;
|
138 |
+
font-size: 35px;
|
139 |
+
font-weight: bold;
|
140 |
+
cursor: pointer;
|
141 |
+
width: 1em;
|
142 |
+
}
|
143 |
+
|
144 |
+
.modalControls span:hover, .modalControls span:focus{
|
145 |
+
color: #999;
|
146 |
+
text-decoration: none;
|
147 |
+
}
|
148 |
+
|
149 |
+
#lightboxModal > img {
|
150 |
+
display: block;
|
151 |
+
margin: auto;
|
152 |
+
width: auto;
|
153 |
+
}
|
154 |
+
|
155 |
+
#lightboxModal > img.modalImageFullscreen{
|
156 |
+
object-fit: contain;
|
157 |
+
height: 100%;
|
158 |
+
width: 100%;
|
159 |
+
min-height: 0;
|
160 |
+
}
|
161 |
+
|
162 |
+
.modalPrev,
|
163 |
+
.modalNext {
|
164 |
+
cursor: pointer;
|
165 |
+
position: absolute;
|
166 |
+
top: 50%;
|
167 |
+
width: auto;
|
168 |
+
padding: 16px;
|
169 |
+
margin-top: -50px;
|
170 |
+
color: white;
|
171 |
+
font-weight: bold;
|
172 |
+
font-size: 20px;
|
173 |
+
transition: 0.6s ease;
|
174 |
+
border-radius: 0 3px 3px 0;
|
175 |
+
user-select: none;
|
176 |
+
-webkit-user-select: none;
|
177 |
+
}
|
178 |
+
|
179 |
+
.modalNext {
|
180 |
+
right: 0;
|
181 |
+
border-radius: 3px 0 0 3px;
|
182 |
+
}
|
183 |
+
|
184 |
+
.modalPrev:hover,
|
185 |
+
.modalNext:hover {
|
186 |
+
background-color: rgba(0, 0, 0, 0.8);
|
187 |
+
}
|
188 |
+
|
189 |
+
#imageARPreview {
|
190 |
+
position: absolute;
|
191 |
+
top: 0px;
|
192 |
+
left: 0px;
|
193 |
+
border: 2px solid red;
|
194 |
+
background: rgba(255, 0, 0, 0.3);
|
195 |
+
z-index: 900;
|
196 |
+
pointer-events: none;
|
197 |
+
display: none;
|
198 |
+
}
|
199 |
+
|
200 |
+
#stylePreviewOverlay {
|
201 |
+
opacity: 0;
|
202 |
+
pointer-events: none;
|
203 |
+
width: 128px;
|
204 |
+
height: 128px;
|
205 |
+
position: fixed;
|
206 |
+
top: 0px;
|
207 |
+
left: 0px;
|
208 |
+
border: solid 1px lightgrey;
|
209 |
+
transform: translate(-140px, 20px);
|
210 |
+
background-size: cover;
|
211 |
+
background-position: center;
|
212 |
+
background-color: rgba(0, 0, 0, 0.3);
|
213 |
+
border-radius: 5px;
|
214 |
+
z-index: 100;
|
215 |
+
transition: transform 0.1s ease, opacity 0.3s ease;
|
216 |
+
}
|
217 |
+
|
218 |
+
#stylePreviewOverlay.lower-half {
|
219 |
+
transform: translate(-140px, -140px);
|
220 |
+
}
|
entry_with_update.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
|
4 |
+
|
5 |
+
root = os.path.dirname(os.path.abspath(__file__))
|
6 |
+
sys.path.append(root)
|
7 |
+
os.chdir(root)
|
8 |
+
|
9 |
+
|
10 |
+
try:
|
11 |
+
import pygit2
|
12 |
+
pygit2.option(pygit2.GIT_OPT_SET_OWNER_VALIDATION, 0)
|
13 |
+
|
14 |
+
repo = pygit2.Repository(os.path.abspath(os.path.dirname(__file__)))
|
15 |
+
|
16 |
+
branch_name = repo.head.shorthand
|
17 |
+
|
18 |
+
remote_name = 'origin'
|
19 |
+
remote = repo.remotes[remote_name]
|
20 |
+
|
21 |
+
remote.fetch()
|
22 |
+
|
23 |
+
local_branch_ref = f'refs/heads/{branch_name}'
|
24 |
+
local_branch = repo.lookup_reference(local_branch_ref)
|
25 |
+
|
26 |
+
remote_reference = f'refs/remotes/{remote_name}/{branch_name}'
|
27 |
+
remote_commit = repo.revparse_single(remote_reference)
|
28 |
+
|
29 |
+
merge_result, _ = repo.merge_analysis(remote_commit.id)
|
30 |
+
|
31 |
+
if merge_result & pygit2.GIT_MERGE_ANALYSIS_UP_TO_DATE:
|
32 |
+
print("Already up-to-date")
|
33 |
+
elif merge_result & pygit2.GIT_MERGE_ANALYSIS_FASTFORWARD:
|
34 |
+
local_branch.set_target(remote_commit.id)
|
35 |
+
repo.head.set_target(remote_commit.id)
|
36 |
+
repo.checkout_tree(repo.get(remote_commit.id))
|
37 |
+
repo.reset(local_branch.target, pygit2.GIT_RESET_HARD)
|
38 |
+
print("Fast-forward merge")
|
39 |
+
elif merge_result & pygit2.GIT_MERGE_ANALYSIS_NORMAL:
|
40 |
+
print("Update failed - Did you modify any file?")
|
41 |
+
except Exception as e:
|
42 |
+
print('Update failed.')
|
43 |
+
print(str(e))
|
44 |
+
|
45 |
+
print('Update succeeded.')
|
46 |
+
from launch import *
|
environment.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: fooocus
|
2 |
+
channels:
|
3 |
+
- defaults
|
4 |
+
dependencies:
|
5 |
+
- python=3.10
|
6 |
+
- pip=23.0
|
7 |
+
- packaging
|
experiments_expansion.py
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from modules.expansion import FooocusExpansion
|
2 |
+
|
3 |
+
expansion = FooocusExpansion()
|
4 |
+
|
5 |
+
text = 'a handsome man'
|
6 |
+
|
7 |
+
for i in range(64):
|
8 |
+
print(expansion(text, seed=i))
|
experiments_face.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import extras.face_crop as cropper
|
3 |
+
|
4 |
+
|
5 |
+
img = cv2.imread('lena.png')
|
6 |
+
result = cropper.crop_image(img)
|
7 |
+
cv2.imwrite('lena_result.png', result)
|
experiments_interrogate.py
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
from extras.interrogate import default_interrogator as default_interrogator_photo
|
3 |
+
from extras.wd14tagger import default_interrogator as default_interrogator_anime
|
4 |
+
|
5 |
+
img = cv2.imread('./test_imgs/red_box.jpg')[:, :, ::-1].copy()
|
6 |
+
print(default_interrogator_photo(img))
|
7 |
+
img = cv2.imread('./test_imgs/miku.jpg')[:, :, ::-1].copy()
|
8 |
+
print(default_interrogator_anime(img))
|
extras/BLIP/configs/bert_config.json
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"BertModel"
|
4 |
+
],
|
5 |
+
"attention_probs_dropout_prob": 0.1,
|
6 |
+
"hidden_act": "gelu",
|
7 |
+
"hidden_dropout_prob": 0.1,
|
8 |
+
"hidden_size": 768,
|
9 |
+
"initializer_range": 0.02,
|
10 |
+
"intermediate_size": 3072,
|
11 |
+
"layer_norm_eps": 1e-12,
|
12 |
+
"max_position_embeddings": 512,
|
13 |
+
"model_type": "bert",
|
14 |
+
"num_attention_heads": 12,
|
15 |
+
"num_hidden_layers": 12,
|
16 |
+
"pad_token_id": 0,
|
17 |
+
"type_vocab_size": 2,
|
18 |
+
"vocab_size": 30522,
|
19 |
+
"encoder_width": 768,
|
20 |
+
"add_cross_attention": true
|
21 |
+
}
|
extras/BLIP/configs/caption_coco.yaml
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
image_root: '/export/share/datasets/vision/coco/images/'
|
2 |
+
ann_root: 'annotation'
|
3 |
+
coco_gt_root: 'annotation/coco_gt'
|
4 |
+
|
5 |
+
# set pretrained as a file path or an url
|
6 |
+
pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_caption_capfilt_large.pth'
|
7 |
+
|
8 |
+
# size of vit model; base or large
|
9 |
+
vit: 'base'
|
10 |
+
vit_grad_ckpt: False
|
11 |
+
vit_ckpt_layer: 0
|
12 |
+
batch_size: 32
|
13 |
+
init_lr: 1e-5
|
14 |
+
|
15 |
+
# vit: 'large'
|
16 |
+
# vit_grad_ckpt: True
|
17 |
+
# vit_ckpt_layer: 5
|
18 |
+
# batch_size: 16
|
19 |
+
# init_lr: 2e-6
|
20 |
+
|
21 |
+
image_size: 384
|
22 |
+
|
23 |
+
# generation configs
|
24 |
+
max_length: 20
|
25 |
+
min_length: 5
|
26 |
+
num_beams: 3
|
27 |
+
prompt: 'a picture of '
|
28 |
+
|
29 |
+
# optimizer
|
30 |
+
weight_decay: 0.05
|
31 |
+
min_lr: 0
|
32 |
+
max_epoch: 5
|
33 |
+
|
extras/BLIP/configs/med_config.json
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"BertModel"
|
4 |
+
],
|
5 |
+
"attention_probs_dropout_prob": 0.1,
|
6 |
+
"hidden_act": "gelu",
|
7 |
+
"hidden_dropout_prob": 0.1,
|
8 |
+
"hidden_size": 768,
|
9 |
+
"initializer_range": 0.02,
|
10 |
+
"intermediate_size": 3072,
|
11 |
+
"layer_norm_eps": 1e-12,
|
12 |
+
"max_position_embeddings": 512,
|
13 |
+
"model_type": "bert",
|
14 |
+
"num_attention_heads": 12,
|
15 |
+
"num_hidden_layers": 12,
|
16 |
+
"pad_token_id": 0,
|
17 |
+
"type_vocab_size": 2,
|
18 |
+
"vocab_size": 30524,
|
19 |
+
"encoder_width": 768,
|
20 |
+
"add_cross_attention": true
|
21 |
+
}
|
extras/BLIP/configs/nlvr.yaml
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
image_root: '/export/share/datasets/vision/NLVR2/'
|
2 |
+
ann_root: 'annotation'
|
3 |
+
|
4 |
+
# set pretrained as a file path or an url
|
5 |
+
pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_nlvr.pth'
|
6 |
+
|
7 |
+
#size of vit model; base or large
|
8 |
+
vit: 'base'
|
9 |
+
batch_size_train: 16
|
10 |
+
batch_size_test: 64
|
11 |
+
vit_grad_ckpt: False
|
12 |
+
vit_ckpt_layer: 0
|
13 |
+
max_epoch: 15
|
14 |
+
|
15 |
+
image_size: 384
|
16 |
+
|
17 |
+
# optimizer
|
18 |
+
weight_decay: 0.05
|
19 |
+
init_lr: 3e-5
|
20 |
+
min_lr: 0
|
21 |
+
|
extras/BLIP/configs/nocaps.yaml
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
image_root: '/export/share/datasets/vision/nocaps/'
|
2 |
+
ann_root: 'annotation'
|
3 |
+
|
4 |
+
# set pretrained as a file path or an url
|
5 |
+
pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_caption_capfilt_large.pth'
|
6 |
+
|
7 |
+
vit: 'base'
|
8 |
+
batch_size: 32
|
9 |
+
|
10 |
+
image_size: 384
|
11 |
+
|
12 |
+
max_length: 20
|
13 |
+
min_length: 5
|
14 |
+
num_beams: 3
|
15 |
+
prompt: 'a picture of '
|
extras/BLIP/configs/pretrain.yaml
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
train_file: ['/export/share/junnan-li/VL_pretrain/annotation/coco_karpathy_train.json',
|
2 |
+
'/export/share/junnan-li/VL_pretrain/annotation/vg_caption.json',
|
3 |
+
]
|
4 |
+
laion_path: ''
|
5 |
+
|
6 |
+
# size of vit model; base or large
|
7 |
+
vit: 'base'
|
8 |
+
vit_grad_ckpt: False
|
9 |
+
vit_ckpt_layer: 0
|
10 |
+
|
11 |
+
image_size: 224
|
12 |
+
batch_size: 75
|
13 |
+
|
14 |
+
queue_size: 57600
|
15 |
+
alpha: 0.4
|
16 |
+
|
17 |
+
# optimizer
|
18 |
+
weight_decay: 0.05
|
19 |
+
init_lr: 3e-4
|
20 |
+
min_lr: 1e-6
|
21 |
+
warmup_lr: 1e-6
|
22 |
+
lr_decay_rate: 0.9
|
23 |
+
max_epoch: 20
|
24 |
+
warmup_steps: 3000
|
25 |
+
|
26 |
+
|
27 |
+
|
extras/BLIP/configs/retrieval_coco.yaml
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
image_root: '/export/share/datasets/vision/coco/images/'
|
2 |
+
ann_root: 'annotation'
|
3 |
+
dataset: 'coco'
|
4 |
+
|
5 |
+
# set pretrained as a file path or an url
|
6 |
+
pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
|
7 |
+
|
8 |
+
# size of vit model; base or large
|
9 |
+
|
10 |
+
vit: 'base'
|
11 |
+
batch_size_train: 32
|
12 |
+
batch_size_test: 64
|
13 |
+
vit_grad_ckpt: True
|
14 |
+
vit_ckpt_layer: 4
|
15 |
+
init_lr: 1e-5
|
16 |
+
|
17 |
+
# vit: 'large'
|
18 |
+
# batch_size_train: 16
|
19 |
+
# batch_size_test: 32
|
20 |
+
# vit_grad_ckpt: True
|
21 |
+
# vit_ckpt_layer: 12
|
22 |
+
# init_lr: 5e-6
|
23 |
+
|
24 |
+
image_size: 384
|
25 |
+
queue_size: 57600
|
26 |
+
alpha: 0.4
|
27 |
+
k_test: 256
|
28 |
+
negative_all_rank: True
|
29 |
+
|
30 |
+
# optimizer
|
31 |
+
weight_decay: 0.05
|
32 |
+
min_lr: 0
|
33 |
+
max_epoch: 6
|
34 |
+
|
extras/BLIP/configs/retrieval_flickr.yaml
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
image_root: '/export/share/datasets/vision/flickr30k/'
|
2 |
+
ann_root: 'annotation'
|
3 |
+
dataset: 'flickr'
|
4 |
+
|
5 |
+
# set pretrained as a file path or an url
|
6 |
+
pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_flickr.pth'
|
7 |
+
|
8 |
+
# size of vit model; base or large
|
9 |
+
|
10 |
+
vit: 'base'
|
11 |
+
batch_size_train: 32
|
12 |
+
batch_size_test: 64
|
13 |
+
vit_grad_ckpt: True
|
14 |
+
vit_ckpt_layer: 4
|
15 |
+
init_lr: 1e-5
|
16 |
+
|
17 |
+
# vit: 'large'
|
18 |
+
# batch_size_train: 16
|
19 |
+
# batch_size_test: 32
|
20 |
+
# vit_grad_ckpt: True
|
21 |
+
# vit_ckpt_layer: 10
|
22 |
+
# init_lr: 5e-6
|
23 |
+
|
24 |
+
image_size: 384
|
25 |
+
queue_size: 57600
|
26 |
+
alpha: 0.4
|
27 |
+
k_test: 128
|
28 |
+
negative_all_rank: False
|
29 |
+
|
30 |
+
# optimizer
|
31 |
+
weight_decay: 0.05
|
32 |
+
min_lr: 0
|
33 |
+
max_epoch: 6
|
34 |
+
|
extras/BLIP/configs/retrieval_msrvtt.yaml
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
video_root: '/export/share/dongxuli/data/msrvtt_retrieval/videos'
|
2 |
+
ann_root: 'annotation'
|
3 |
+
|
4 |
+
# set pretrained as a file path or an url
|
5 |
+
pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
|
6 |
+
|
7 |
+
# size of vit model; base or large
|
8 |
+
vit: 'base'
|
9 |
+
batch_size: 64
|
10 |
+
k_test: 128
|
11 |
+
image_size: 384
|
12 |
+
num_frm_test: 8
|
extras/BLIP/configs/vqa.yaml
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
vqa_root: '/export/share/datasets/vision/VQA/Images/mscoco/' #followed by train2014/
|
2 |
+
vg_root: '/export/share/datasets/vision/visual-genome/' #followed by image/
|
3 |
+
train_files: ['vqa_train','vqa_val','vg_qa']
|
4 |
+
ann_root: 'annotation'
|
5 |
+
|
6 |
+
# set pretrained as a file path or an url
|
7 |
+
pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
|
8 |
+
|
9 |
+
# size of vit model; base or large
|
10 |
+
vit: 'base'
|
11 |
+
batch_size_train: 16
|
12 |
+
batch_size_test: 32
|
13 |
+
vit_grad_ckpt: False
|
14 |
+
vit_ckpt_layer: 0
|
15 |
+
init_lr: 2e-5
|
16 |
+
|
17 |
+
image_size: 480
|
18 |
+
|
19 |
+
k_test: 128
|
20 |
+
inference: 'rank'
|
21 |
+
|
22 |
+
# optimizer
|
23 |
+
weight_decay: 0.05
|
24 |
+
min_lr: 0
|
25 |
+
max_epoch: 10
|
extras/BLIP/models/bert_tokenizer/config.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"BertForMaskedLM"
|
4 |
+
],
|
5 |
+
"attention_probs_dropout_prob": 0.1,
|
6 |
+
"gradient_checkpointing": false,
|
7 |
+
"hidden_act": "gelu",
|
8 |
+
"hidden_dropout_prob": 0.1,
|
9 |
+
"hidden_size": 768,
|
10 |
+
"initializer_range": 0.02,
|
11 |
+
"intermediate_size": 3072,
|
12 |
+
"layer_norm_eps": 1e-12,
|
13 |
+
"max_position_embeddings": 512,
|
14 |
+
"model_type": "bert",
|
15 |
+
"num_attention_heads": 12,
|
16 |
+
"num_hidden_layers": 12,
|
17 |
+
"pad_token_id": 0,
|
18 |
+
"position_embedding_type": "absolute",
|
19 |
+
"transformers_version": "4.6.0.dev0",
|
20 |
+
"type_vocab_size": 2,
|
21 |
+
"use_cache": true,
|
22 |
+
"vocab_size": 30522
|
23 |
+
}
|
extras/BLIP/models/bert_tokenizer/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
extras/BLIP/models/bert_tokenizer/tokenizer_config.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"do_lower_case": true
|
3 |
+
}
|
extras/BLIP/models/bert_tokenizer/vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
extras/BLIP/models/blip.py
ADDED
@@ -0,0 +1,239 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
* Copyright (c) 2022, salesforce.com, inc.
|
3 |
+
* All rights reserved.
|
4 |
+
* SPDX-License-Identifier: BSD-3-Clause
|
5 |
+
* For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
|
6 |
+
* By Junnan Li
|
7 |
+
'''
|
8 |
+
import warnings
|
9 |
+
warnings.filterwarnings("ignore")
|
10 |
+
|
11 |
+
from extras.BLIP.models.vit import VisionTransformer, interpolate_pos_embed
|
12 |
+
from extras.BLIP.models.med import BertConfig, BertModel, BertLMHeadModel
|
13 |
+
from transformers import BertTokenizer
|
14 |
+
|
15 |
+
import torch
|
16 |
+
from torch import nn
|
17 |
+
import torch.nn.functional as F
|
18 |
+
|
19 |
+
import os
|
20 |
+
from urllib.parse import urlparse
|
21 |
+
from timm.models.hub import download_cached_file
|
22 |
+
|
23 |
+
class BLIP_Base(nn.Module):
|
24 |
+
def __init__(self,
|
25 |
+
med_config = 'configs/med_config.json',
|
26 |
+
image_size = 224,
|
27 |
+
vit = 'base',
|
28 |
+
vit_grad_ckpt = False,
|
29 |
+
vit_ckpt_layer = 0,
|
30 |
+
):
|
31 |
+
"""
|
32 |
+
Args:
|
33 |
+
med_config (str): path for the mixture of encoder-decoder model's configuration file
|
34 |
+
image_size (int): input image size
|
35 |
+
vit (str): model size of vision transformer
|
36 |
+
"""
|
37 |
+
super().__init__()
|
38 |
+
|
39 |
+
self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer)
|
40 |
+
self.tokenizer = init_tokenizer()
|
41 |
+
med_config = BertConfig.from_json_file(med_config)
|
42 |
+
med_config.encoder_width = vision_width
|
43 |
+
self.text_encoder = BertModel(config=med_config, add_pooling_layer=False)
|
44 |
+
|
45 |
+
|
46 |
+
def forward(self, image, caption, mode):
|
47 |
+
|
48 |
+
assert mode in ['image', 'text', 'multimodal'], "mode parameter must be image, text, or multimodal"
|
49 |
+
text = self.tokenizer(caption, return_tensors="pt").to(image.device)
|
50 |
+
|
51 |
+
if mode=='image':
|
52 |
+
# return image features
|
53 |
+
image_embeds = self.visual_encoder(image)
|
54 |
+
return image_embeds
|
55 |
+
|
56 |
+
elif mode=='text':
|
57 |
+
# return text features
|
58 |
+
text_output = self.text_encoder(text.input_ids, attention_mask = text.attention_mask,
|
59 |
+
return_dict = True, mode = 'text')
|
60 |
+
return text_output.last_hidden_state
|
61 |
+
|
62 |
+
elif mode=='multimodal':
|
63 |
+
# return multimodel features
|
64 |
+
image_embeds = self.visual_encoder(image)
|
65 |
+
image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device)
|
66 |
+
|
67 |
+
text.input_ids[:,0] = self.tokenizer.enc_token_id
|
68 |
+
output = self.text_encoder(text.input_ids,
|
69 |
+
attention_mask = text.attention_mask,
|
70 |
+
encoder_hidden_states = image_embeds,
|
71 |
+
encoder_attention_mask = image_atts,
|
72 |
+
return_dict = True,
|
73 |
+
)
|
74 |
+
return output.last_hidden_state
|
75 |
+
|
76 |
+
|
77 |
+
|
78 |
+
class BLIP_Decoder(nn.Module):
|
79 |
+
def __init__(self,
|
80 |
+
med_config = 'configs/med_config.json',
|
81 |
+
image_size = 384,
|
82 |
+
vit = 'base',
|
83 |
+
vit_grad_ckpt = False,
|
84 |
+
vit_ckpt_layer = 0,
|
85 |
+
prompt = 'a picture of ',
|
86 |
+
):
|
87 |
+
"""
|
88 |
+
Args:
|
89 |
+
med_config (str): path for the mixture of encoder-decoder model's configuration file
|
90 |
+
image_size (int): input image size
|
91 |
+
vit (str): model size of vision transformer
|
92 |
+
"""
|
93 |
+
super().__init__()
|
94 |
+
|
95 |
+
self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer)
|
96 |
+
self.tokenizer = init_tokenizer()
|
97 |
+
med_config = BertConfig.from_json_file(med_config)
|
98 |
+
med_config.encoder_width = vision_width
|
99 |
+
self.text_decoder = BertLMHeadModel(config=med_config)
|
100 |
+
|
101 |
+
self.prompt = prompt
|
102 |
+
self.prompt_length = len(self.tokenizer(self.prompt).input_ids)-1
|
103 |
+
|
104 |
+
|
105 |
+
def forward(self, image, caption):
|
106 |
+
|
107 |
+
image_embeds = self.visual_encoder(image)
|
108 |
+
image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device)
|
109 |
+
|
110 |
+
text = self.tokenizer(caption, padding='longest', truncation=True, max_length=40, return_tensors="pt").to(image.device)
|
111 |
+
|
112 |
+
text.input_ids[:,0] = self.tokenizer.bos_token_id
|
113 |
+
|
114 |
+
decoder_targets = text.input_ids.masked_fill(text.input_ids == self.tokenizer.pad_token_id, -100)
|
115 |
+
decoder_targets[:,:self.prompt_length] = -100
|
116 |
+
|
117 |
+
decoder_output = self.text_decoder(text.input_ids,
|
118 |
+
attention_mask = text.attention_mask,
|
119 |
+
encoder_hidden_states = image_embeds,
|
120 |
+
encoder_attention_mask = image_atts,
|
121 |
+
labels = decoder_targets,
|
122 |
+
return_dict = True,
|
123 |
+
)
|
124 |
+
loss_lm = decoder_output.loss
|
125 |
+
|
126 |
+
return loss_lm
|
127 |
+
|
128 |
+
def generate(self, image, sample=False, num_beams=3, max_length=30, min_length=10, top_p=0.9, repetition_penalty=1.0):
|
129 |
+
image_embeds = self.visual_encoder(image)
|
130 |
+
|
131 |
+
if not sample:
|
132 |
+
image_embeds = image_embeds.repeat_interleave(num_beams,dim=0)
|
133 |
+
|
134 |
+
image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device)
|
135 |
+
model_kwargs = {"encoder_hidden_states": image_embeds, "encoder_attention_mask":image_atts}
|
136 |
+
|
137 |
+
prompt = [self.prompt] * image.size(0)
|
138 |
+
input_ids = self.tokenizer(prompt, return_tensors="pt").input_ids.to(image.device)
|
139 |
+
input_ids[:,0] = self.tokenizer.bos_token_id
|
140 |
+
input_ids = input_ids[:, :-1]
|
141 |
+
|
142 |
+
if sample:
|
143 |
+
#nucleus sampling
|
144 |
+
outputs = self.text_decoder.generate(input_ids=input_ids,
|
145 |
+
max_length=max_length,
|
146 |
+
min_length=min_length,
|
147 |
+
do_sample=True,
|
148 |
+
top_p=top_p,
|
149 |
+
num_return_sequences=1,
|
150 |
+
eos_token_id=self.tokenizer.sep_token_id,
|
151 |
+
pad_token_id=self.tokenizer.pad_token_id,
|
152 |
+
repetition_penalty=1.1,
|
153 |
+
**model_kwargs)
|
154 |
+
else:
|
155 |
+
#beam search
|
156 |
+
outputs = self.text_decoder.generate(input_ids=input_ids,
|
157 |
+
max_length=max_length,
|
158 |
+
min_length=min_length,
|
159 |
+
num_beams=num_beams,
|
160 |
+
eos_token_id=self.tokenizer.sep_token_id,
|
161 |
+
pad_token_id=self.tokenizer.pad_token_id,
|
162 |
+
repetition_penalty=repetition_penalty,
|
163 |
+
**model_kwargs)
|
164 |
+
|
165 |
+
captions = []
|
166 |
+
for output in outputs:
|
167 |
+
caption = self.tokenizer.decode(output, skip_special_tokens=True)
|
168 |
+
captions.append(caption[len(self.prompt):])
|
169 |
+
return captions
|
170 |
+
|
171 |
+
|
172 |
+
def blip_decoder(pretrained='',**kwargs):
|
173 |
+
model = BLIP_Decoder(**kwargs)
|
174 |
+
if pretrained:
|
175 |
+
model,msg = load_checkpoint(model,pretrained)
|
176 |
+
assert(len(msg.missing_keys)==0)
|
177 |
+
return model
|
178 |
+
|
179 |
+
def blip_feature_extractor(pretrained='',**kwargs):
|
180 |
+
model = BLIP_Base(**kwargs)
|
181 |
+
if pretrained:
|
182 |
+
model,msg = load_checkpoint(model,pretrained)
|
183 |
+
assert(len(msg.missing_keys)==0)
|
184 |
+
return model
|
185 |
+
|
186 |
+
def init_tokenizer():
|
187 |
+
tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "bert_tokenizer")
|
188 |
+
tokenizer = BertTokenizer.from_pretrained(tokenizer_path)
|
189 |
+
tokenizer.add_special_tokens({'bos_token':'[DEC]'})
|
190 |
+
tokenizer.add_special_tokens({'additional_special_tokens':['[ENC]']})
|
191 |
+
tokenizer.enc_token_id = tokenizer.additional_special_tokens_ids[0]
|
192 |
+
return tokenizer
|
193 |
+
|
194 |
+
|
195 |
+
def create_vit(vit, image_size, use_grad_checkpointing=False, ckpt_layer=0, drop_path_rate=0):
|
196 |
+
|
197 |
+
assert vit in ['base', 'large'], "vit parameter must be base or large"
|
198 |
+
if vit=='base':
|
199 |
+
vision_width = 768
|
200 |
+
visual_encoder = VisionTransformer(img_size=image_size, patch_size=16, embed_dim=vision_width, depth=12,
|
201 |
+
num_heads=12, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer,
|
202 |
+
drop_path_rate=0 or drop_path_rate
|
203 |
+
)
|
204 |
+
elif vit=='large':
|
205 |
+
vision_width = 1024
|
206 |
+
visual_encoder = VisionTransformer(img_size=image_size, patch_size=16, embed_dim=vision_width, depth=24,
|
207 |
+
num_heads=16, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer,
|
208 |
+
drop_path_rate=0.1 or drop_path_rate
|
209 |
+
)
|
210 |
+
return visual_encoder, vision_width
|
211 |
+
|
212 |
+
def is_url(url_or_filename):
|
213 |
+
parsed = urlparse(url_or_filename)
|
214 |
+
return parsed.scheme in ("http", "https")
|
215 |
+
|
216 |
+
def load_checkpoint(model,url_or_filename):
|
217 |
+
if is_url(url_or_filename):
|
218 |
+
cached_file = download_cached_file(url_or_filename, check_hash=False, progress=True)
|
219 |
+
checkpoint = torch.load(cached_file, map_location='cpu')
|
220 |
+
elif os.path.isfile(url_or_filename):
|
221 |
+
checkpoint = torch.load(url_or_filename, map_location='cpu')
|
222 |
+
else:
|
223 |
+
raise RuntimeError('checkpoint url or path is invalid')
|
224 |
+
|
225 |
+
state_dict = checkpoint['model']
|
226 |
+
|
227 |
+
state_dict['visual_encoder.pos_embed'] = interpolate_pos_embed(state_dict['visual_encoder.pos_embed'],model.visual_encoder)
|
228 |
+
if 'visual_encoder_m.pos_embed' in model.state_dict().keys():
|
229 |
+
state_dict['visual_encoder_m.pos_embed'] = interpolate_pos_embed(state_dict['visual_encoder_m.pos_embed'],
|
230 |
+
model.visual_encoder_m)
|
231 |
+
for key in model.state_dict().keys():
|
232 |
+
if key in state_dict.keys():
|
233 |
+
if state_dict[key].shape!=model.state_dict()[key].shape:
|
234 |
+
del state_dict[key]
|
235 |
+
|
236 |
+
msg = model.load_state_dict(state_dict,strict=False)
|
237 |
+
print('load checkpoint from %s'%url_or_filename)
|
238 |
+
return model,msg
|
239 |
+
|
extras/BLIP/models/blip_itm.py
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from extras.BLIP.models.med import BertConfig, BertModel
|
2 |
+
from transformers import BertTokenizer
|
3 |
+
|
4 |
+
import torch
|
5 |
+
from torch import nn
|
6 |
+
import torch.nn.functional as F
|
7 |
+
|
8 |
+
from extras.BLIP.models.blip import create_vit, init_tokenizer, load_checkpoint
|
9 |
+
|
10 |
+
class BLIP_ITM(nn.Module):
|
11 |
+
def __init__(self,
|
12 |
+
med_config = 'configs/med_config.json',
|
13 |
+
image_size = 384,
|
14 |
+
vit = 'base',
|
15 |
+
vit_grad_ckpt = False,
|
16 |
+
vit_ckpt_layer = 0,
|
17 |
+
embed_dim = 256,
|
18 |
+
):
|
19 |
+
"""
|
20 |
+
Args:
|
21 |
+
med_config (str): path for the mixture of encoder-decoder model's configuration file
|
22 |
+
image_size (int): input image size
|
23 |
+
vit (str): model size of vision transformer
|
24 |
+
"""
|
25 |
+
super().__init__()
|
26 |
+
|
27 |
+
self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer)
|
28 |
+
self.tokenizer = init_tokenizer()
|
29 |
+
med_config = BertConfig.from_json_file(med_config)
|
30 |
+
med_config.encoder_width = vision_width
|
31 |
+
self.text_encoder = BertModel(config=med_config, add_pooling_layer=False)
|
32 |
+
|
33 |
+
text_width = self.text_encoder.config.hidden_size
|
34 |
+
|
35 |
+
self.vision_proj = nn.Linear(vision_width, embed_dim)
|
36 |
+
self.text_proj = nn.Linear(text_width, embed_dim)
|
37 |
+
|
38 |
+
self.itm_head = nn.Linear(text_width, 2)
|
39 |
+
|
40 |
+
|
41 |
+
def forward(self, image, caption, match_head='itm'):
|
42 |
+
|
43 |
+
image_embeds = self.visual_encoder(image)
|
44 |
+
image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device)
|
45 |
+
|
46 |
+
text = self.tokenizer(caption, padding='max_length', truncation=True, max_length=35,
|
47 |
+
return_tensors="pt").to(image.device)
|
48 |
+
|
49 |
+
|
50 |
+
if match_head=='itm':
|
51 |
+
output = self.text_encoder(text.input_ids,
|
52 |
+
attention_mask = text.attention_mask,
|
53 |
+
encoder_hidden_states = image_embeds,
|
54 |
+
encoder_attention_mask = image_atts,
|
55 |
+
return_dict = True,
|
56 |
+
)
|
57 |
+
itm_output = self.itm_head(output.last_hidden_state[:,0,:])
|
58 |
+
return itm_output
|
59 |
+
|
60 |
+
elif match_head=='itc':
|
61 |
+
text_output = self.text_encoder(text.input_ids, attention_mask = text.attention_mask,
|
62 |
+
return_dict = True, mode = 'text')
|
63 |
+
image_feat = F.normalize(self.vision_proj(image_embeds[:,0,:]),dim=-1)
|
64 |
+
text_feat = F.normalize(self.text_proj(text_output.last_hidden_state[:,0,:]),dim=-1)
|
65 |
+
|
66 |
+
sim = image_feat @ text_feat.t()
|
67 |
+
return sim
|
68 |
+
|
69 |
+
|
70 |
+
def blip_itm(pretrained='',**kwargs):
|
71 |
+
model = BLIP_ITM(**kwargs)
|
72 |
+
if pretrained:
|
73 |
+
model,msg = load_checkpoint(model,pretrained)
|
74 |
+
assert(len(msg.missing_keys)==0)
|
75 |
+
return model
|
76 |
+
|
extras/BLIP/models/blip_nlvr.py
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from extras.BLIP.models.med import BertConfig
|
2 |
+
from extras.BLIP.models.nlvr_encoder import BertModel
|
3 |
+
from extras.BLIP.models.vit import interpolate_pos_embed
|
4 |
+
from extras.BLIP.models.blip import create_vit, init_tokenizer, is_url
|
5 |
+
|
6 |
+
from timm.models.hub import download_cached_file
|
7 |
+
|
8 |
+
import torch
|
9 |
+
from torch import nn
|
10 |
+
import torch.nn.functional as F
|
11 |
+
from transformers import BertTokenizer
|
12 |
+
import numpy as np
|
13 |
+
import os
|
14 |
+
|
15 |
+
|
16 |
+
class BLIP_NLVR(nn.Module):
|
17 |
+
def __init__(self,
|
18 |
+
med_config = 'configs/med_config.json',
|
19 |
+
image_size = 480,
|
20 |
+
vit = 'base',
|
21 |
+
vit_grad_ckpt = False,
|
22 |
+
vit_ckpt_layer = 0,
|
23 |
+
):
|
24 |
+
"""
|
25 |
+
Args:
|
26 |
+
med_config (str): path for the mixture of encoder-decoder model's configuration file
|
27 |
+
image_size (int): input image size
|
28 |
+
vit (str): model size of vision transformer
|
29 |
+
"""
|
30 |
+
super().__init__()
|
31 |
+
|
32 |
+
self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer, drop_path_rate=0.1)
|
33 |
+
self.tokenizer = init_tokenizer()
|
34 |
+
med_config = BertConfig.from_json_file(med_config)
|
35 |
+
med_config.encoder_width = vision_width
|
36 |
+
self.text_encoder = BertModel(config=med_config, add_pooling_layer=False)
|
37 |
+
|
38 |
+
self.cls_head = nn.Sequential(
|
39 |
+
nn.Linear(self.text_encoder.config.hidden_size, self.text_encoder.config.hidden_size),
|
40 |
+
nn.ReLU(),
|
41 |
+
nn.Linear(self.text_encoder.config.hidden_size, 2)
|
42 |
+
)
|
43 |
+
|
44 |
+
def forward(self, image, text, targets, train=True):
|
45 |
+
|
46 |
+
image_embeds = self.visual_encoder(image)
|
47 |
+
image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device)
|
48 |
+
image0_embeds, image1_embeds = torch.split(image_embeds,targets.size(0))
|
49 |
+
|
50 |
+
text = self.tokenizer(text, padding='longest', return_tensors="pt").to(image.device)
|
51 |
+
text.input_ids[:,0] = self.tokenizer.enc_token_id
|
52 |
+
|
53 |
+
output = self.text_encoder(text.input_ids,
|
54 |
+
attention_mask = text.attention_mask,
|
55 |
+
encoder_hidden_states = [image0_embeds,image1_embeds],
|
56 |
+
encoder_attention_mask = [image_atts[:image0_embeds.size(0)],
|
57 |
+
image_atts[image0_embeds.size(0):]],
|
58 |
+
return_dict = True,
|
59 |
+
)
|
60 |
+
hidden_state = output.last_hidden_state[:,0,:]
|
61 |
+
prediction = self.cls_head(hidden_state)
|
62 |
+
|
63 |
+
if train:
|
64 |
+
loss = F.cross_entropy(prediction, targets)
|
65 |
+
return loss
|
66 |
+
else:
|
67 |
+
return prediction
|
68 |
+
|
69 |
+
def blip_nlvr(pretrained='',**kwargs):
|
70 |
+
model = BLIP_NLVR(**kwargs)
|
71 |
+
if pretrained:
|
72 |
+
model,msg = load_checkpoint(model,pretrained)
|
73 |
+
print("missing keys:")
|
74 |
+
print(msg.missing_keys)
|
75 |
+
return model
|
76 |
+
|
77 |
+
|
78 |
+
def load_checkpoint(model,url_or_filename):
|
79 |
+
if is_url(url_or_filename):
|
80 |
+
cached_file = download_cached_file(url_or_filename, check_hash=False, progress=True)
|
81 |
+
checkpoint = torch.load(cached_file, map_location='cpu')
|
82 |
+
elif os.path.isfile(url_or_filename):
|
83 |
+
checkpoint = torch.load(url_or_filename, map_location='cpu')
|
84 |
+
else:
|
85 |
+
raise RuntimeError('checkpoint url or path is invalid')
|
86 |
+
state_dict = checkpoint['model']
|
87 |
+
|
88 |
+
state_dict['visual_encoder.pos_embed'] = interpolate_pos_embed(state_dict['visual_encoder.pos_embed'],model.visual_encoder)
|
89 |
+
|
90 |
+
for key in list(state_dict.keys()):
|
91 |
+
if 'crossattention.self.' in key:
|
92 |
+
new_key0 = key.replace('self','self0')
|
93 |
+
new_key1 = key.replace('self','self1')
|
94 |
+
state_dict[new_key0] = state_dict[key]
|
95 |
+
state_dict[new_key1] = state_dict[key]
|
96 |
+
elif 'crossattention.output.dense.' in key:
|
97 |
+
new_key0 = key.replace('dense','dense0')
|
98 |
+
new_key1 = key.replace('dense','dense1')
|
99 |
+
state_dict[new_key0] = state_dict[key]
|
100 |
+
state_dict[new_key1] = state_dict[key]
|
101 |
+
|
102 |
+
msg = model.load_state_dict(state_dict,strict=False)
|
103 |
+
print('load checkpoint from %s'%url_or_filename)
|
104 |
+
return model,msg
|
105 |
+
|
extras/BLIP/models/blip_pretrain.py
ADDED
@@ -0,0 +1,339 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
* Copyright (c) 2022, salesforce.com, inc.
|
3 |
+
* All rights reserved.
|
4 |
+
* SPDX-License-Identifier: BSD-3-Clause
|
5 |
+
* For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
|
6 |
+
* By Junnan Li
|
7 |
+
'''
|
8 |
+
from extras.BLIP.models.med import BertConfig, BertModel, BertLMHeadModel
|
9 |
+
from transformers import BertTokenizer
|
10 |
+
import transformers
|
11 |
+
transformers.logging.set_verbosity_error()
|
12 |
+
|
13 |
+
import torch
|
14 |
+
from torch import nn
|
15 |
+
import torch.nn.functional as F
|
16 |
+
|
17 |
+
from extras.BLIP.models.blip import create_vit, init_tokenizer, load_checkpoint
|
18 |
+
|
19 |
+
class BLIP_Pretrain(nn.Module):
|
20 |
+
def __init__(self,
|
21 |
+
med_config = 'configs/bert_config.json',
|
22 |
+
image_size = 224,
|
23 |
+
vit = 'base',
|
24 |
+
vit_grad_ckpt = False,
|
25 |
+
vit_ckpt_layer = 0,
|
26 |
+
embed_dim = 256,
|
27 |
+
queue_size = 57600,
|
28 |
+
momentum = 0.995,
|
29 |
+
):
|
30 |
+
"""
|
31 |
+
Args:
|
32 |
+
med_config (str): path for the mixture of encoder-decoder model's configuration file
|
33 |
+
image_size (int): input image size
|
34 |
+
vit (str): model size of vision transformer
|
35 |
+
"""
|
36 |
+
super().__init__()
|
37 |
+
|
38 |
+
self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer, 0)
|
39 |
+
|
40 |
+
if vit=='base':
|
41 |
+
checkpoint = torch.hub.load_state_dict_from_url(
|
42 |
+
url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth",
|
43 |
+
map_location="cpu", check_hash=True)
|
44 |
+
state_dict = checkpoint["model"]
|
45 |
+
msg = self.visual_encoder.load_state_dict(state_dict,strict=False)
|
46 |
+
elif vit=='large':
|
47 |
+
from timm.models.helpers import load_custom_pretrained
|
48 |
+
from timm.models.vision_transformer import default_cfgs
|
49 |
+
load_custom_pretrained(self.visual_encoder,default_cfgs['vit_large_patch16_224_in21k'])
|
50 |
+
|
51 |
+
self.tokenizer = init_tokenizer()
|
52 |
+
encoder_config = BertConfig.from_json_file(med_config)
|
53 |
+
encoder_config.encoder_width = vision_width
|
54 |
+
self.text_encoder = BertModel.from_pretrained('bert-base-uncased',config=encoder_config, add_pooling_layer=False)
|
55 |
+
self.text_encoder.resize_token_embeddings(len(self.tokenizer))
|
56 |
+
|
57 |
+
text_width = self.text_encoder.config.hidden_size
|
58 |
+
|
59 |
+
self.vision_proj = nn.Linear(vision_width, embed_dim)
|
60 |
+
self.text_proj = nn.Linear(text_width, embed_dim)
|
61 |
+
|
62 |
+
self.itm_head = nn.Linear(text_width, 2)
|
63 |
+
|
64 |
+
# create momentum encoders
|
65 |
+
self.visual_encoder_m, vision_width = create_vit(vit,image_size)
|
66 |
+
self.vision_proj_m = nn.Linear(vision_width, embed_dim)
|
67 |
+
self.text_encoder_m = BertModel(config=encoder_config, add_pooling_layer=False)
|
68 |
+
self.text_proj_m = nn.Linear(text_width, embed_dim)
|
69 |
+
|
70 |
+
self.model_pairs = [[self.visual_encoder,self.visual_encoder_m],
|
71 |
+
[self.vision_proj,self.vision_proj_m],
|
72 |
+
[self.text_encoder,self.text_encoder_m],
|
73 |
+
[self.text_proj,self.text_proj_m],
|
74 |
+
]
|
75 |
+
self.copy_params()
|
76 |
+
|
77 |
+
# create the queue
|
78 |
+
self.register_buffer("image_queue", torch.randn(embed_dim, queue_size))
|
79 |
+
self.register_buffer("text_queue", torch.randn(embed_dim, queue_size))
|
80 |
+
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
|
81 |
+
|
82 |
+
self.image_queue = nn.functional.normalize(self.image_queue, dim=0)
|
83 |
+
self.text_queue = nn.functional.normalize(self.text_queue, dim=0)
|
84 |
+
|
85 |
+
self.queue_size = queue_size
|
86 |
+
self.momentum = momentum
|
87 |
+
self.temp = nn.Parameter(0.07*torch.ones([]))
|
88 |
+
|
89 |
+
# create the decoder
|
90 |
+
decoder_config = BertConfig.from_json_file(med_config)
|
91 |
+
decoder_config.encoder_width = vision_width
|
92 |
+
self.text_decoder = BertLMHeadModel.from_pretrained('bert-base-uncased',config=decoder_config)
|
93 |
+
self.text_decoder.resize_token_embeddings(len(self.tokenizer))
|
94 |
+
tie_encoder_decoder_weights(self.text_encoder,self.text_decoder.bert,'','/attention')
|
95 |
+
|
96 |
+
|
97 |
+
def forward(self, image, caption, alpha):
|
98 |
+
with torch.no_grad():
|
99 |
+
self.temp.clamp_(0.001,0.5)
|
100 |
+
|
101 |
+
image_embeds = self.visual_encoder(image)
|
102 |
+
image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device)
|
103 |
+
image_feat = F.normalize(self.vision_proj(image_embeds[:,0,:]),dim=-1)
|
104 |
+
|
105 |
+
text = self.tokenizer(caption, padding='max_length', truncation=True, max_length=30,
|
106 |
+
return_tensors="pt").to(image.device)
|
107 |
+
text_output = self.text_encoder(text.input_ids, attention_mask = text.attention_mask,
|
108 |
+
return_dict = True, mode = 'text')
|
109 |
+
text_feat = F.normalize(self.text_proj(text_output.last_hidden_state[:,0,:]),dim=-1)
|
110 |
+
|
111 |
+
# get momentum features
|
112 |
+
with torch.no_grad():
|
113 |
+
self._momentum_update()
|
114 |
+
image_embeds_m = self.visual_encoder_m(image)
|
115 |
+
image_feat_m = F.normalize(self.vision_proj_m(image_embeds_m[:,0,:]),dim=-1)
|
116 |
+
image_feat_all = torch.cat([image_feat_m.t(),self.image_queue.clone().detach()],dim=1)
|
117 |
+
|
118 |
+
text_output_m = self.text_encoder_m(text.input_ids, attention_mask = text.attention_mask,
|
119 |
+
return_dict = True, mode = 'text')
|
120 |
+
text_feat_m = F.normalize(self.text_proj_m(text_output_m.last_hidden_state[:,0,:]),dim=-1)
|
121 |
+
text_feat_all = torch.cat([text_feat_m.t(),self.text_queue.clone().detach()],dim=1)
|
122 |
+
|
123 |
+
sim_i2t_m = image_feat_m @ text_feat_all / self.temp
|
124 |
+
sim_t2i_m = text_feat_m @ image_feat_all / self.temp
|
125 |
+
|
126 |
+
sim_targets = torch.zeros(sim_i2t_m.size()).to(image.device)
|
127 |
+
sim_targets.fill_diagonal_(1)
|
128 |
+
|
129 |
+
sim_i2t_targets = alpha * F.softmax(sim_i2t_m, dim=1) + (1 - alpha) * sim_targets
|
130 |
+
sim_t2i_targets = alpha * F.softmax(sim_t2i_m, dim=1) + (1 - alpha) * sim_targets
|
131 |
+
|
132 |
+
sim_i2t = image_feat @ text_feat_all / self.temp
|
133 |
+
sim_t2i = text_feat @ image_feat_all / self.temp
|
134 |
+
|
135 |
+
loss_i2t = -torch.sum(F.log_softmax(sim_i2t, dim=1)*sim_i2t_targets,dim=1).mean()
|
136 |
+
loss_t2i = -torch.sum(F.log_softmax(sim_t2i, dim=1)*sim_t2i_targets,dim=1).mean()
|
137 |
+
|
138 |
+
loss_ita = (loss_i2t+loss_t2i)/2
|
139 |
+
|
140 |
+
self._dequeue_and_enqueue(image_feat_m, text_feat_m)
|
141 |
+
|
142 |
+
###============== Image-text Matching ===================###
|
143 |
+
encoder_input_ids = text.input_ids.clone()
|
144 |
+
encoder_input_ids[:,0] = self.tokenizer.enc_token_id
|
145 |
+
|
146 |
+
# forward the positve image-text pair
|
147 |
+
bs = image.size(0)
|
148 |
+
output_pos = self.text_encoder(encoder_input_ids,
|
149 |
+
attention_mask = text.attention_mask,
|
150 |
+
encoder_hidden_states = image_embeds,
|
151 |
+
encoder_attention_mask = image_atts,
|
152 |
+
return_dict = True,
|
153 |
+
)
|
154 |
+
with torch.no_grad():
|
155 |
+
weights_t2i = F.softmax(sim_t2i[:,:bs],dim=1)+1e-4
|
156 |
+
weights_t2i.fill_diagonal_(0)
|
157 |
+
weights_i2t = F.softmax(sim_i2t[:,:bs],dim=1)+1e-4
|
158 |
+
weights_i2t.fill_diagonal_(0)
|
159 |
+
|
160 |
+
# select a negative image for each text
|
161 |
+
image_embeds_neg = []
|
162 |
+
for b in range(bs):
|
163 |
+
neg_idx = torch.multinomial(weights_t2i[b], 1).item()
|
164 |
+
image_embeds_neg.append(image_embeds[neg_idx])
|
165 |
+
image_embeds_neg = torch.stack(image_embeds_neg,dim=0)
|
166 |
+
|
167 |
+
# select a negative text for each image
|
168 |
+
text_ids_neg = []
|
169 |
+
text_atts_neg = []
|
170 |
+
for b in range(bs):
|
171 |
+
neg_idx = torch.multinomial(weights_i2t[b], 1).item()
|
172 |
+
text_ids_neg.append(encoder_input_ids[neg_idx])
|
173 |
+
text_atts_neg.append(text.attention_mask[neg_idx])
|
174 |
+
|
175 |
+
text_ids_neg = torch.stack(text_ids_neg,dim=0)
|
176 |
+
text_atts_neg = torch.stack(text_atts_neg,dim=0)
|
177 |
+
|
178 |
+
text_ids_all = torch.cat([encoder_input_ids, text_ids_neg],dim=0)
|
179 |
+
text_atts_all = torch.cat([text.attention_mask, text_atts_neg],dim=0)
|
180 |
+
|
181 |
+
image_embeds_all = torch.cat([image_embeds_neg,image_embeds],dim=0)
|
182 |
+
image_atts_all = torch.cat([image_atts,image_atts],dim=0)
|
183 |
+
|
184 |
+
output_neg = self.text_encoder(text_ids_all,
|
185 |
+
attention_mask = text_atts_all,
|
186 |
+
encoder_hidden_states = image_embeds_all,
|
187 |
+
encoder_attention_mask = image_atts_all,
|
188 |
+
return_dict = True,
|
189 |
+
)
|
190 |
+
|
191 |
+
vl_embeddings = torch.cat([output_pos.last_hidden_state[:,0,:], output_neg.last_hidden_state[:,0,:]],dim=0)
|
192 |
+
vl_output = self.itm_head(vl_embeddings)
|
193 |
+
|
194 |
+
itm_labels = torch.cat([torch.ones(bs,dtype=torch.long),torch.zeros(2*bs,dtype=torch.long)],
|
195 |
+
dim=0).to(image.device)
|
196 |
+
loss_itm = F.cross_entropy(vl_output, itm_labels)
|
197 |
+
|
198 |
+
##================= LM ========================##
|
199 |
+
decoder_input_ids = text.input_ids.clone()
|
200 |
+
decoder_input_ids[:,0] = self.tokenizer.bos_token_id
|
201 |
+
decoder_targets = decoder_input_ids.masked_fill(decoder_input_ids == self.tokenizer.pad_token_id, -100)
|
202 |
+
|
203 |
+
decoder_output = self.text_decoder(decoder_input_ids,
|
204 |
+
attention_mask = text.attention_mask,
|
205 |
+
encoder_hidden_states = image_embeds,
|
206 |
+
encoder_attention_mask = image_atts,
|
207 |
+
labels = decoder_targets,
|
208 |
+
return_dict = True,
|
209 |
+
)
|
210 |
+
|
211 |
+
loss_lm = decoder_output.loss
|
212 |
+
return loss_ita, loss_itm, loss_lm
|
213 |
+
|
214 |
+
|
215 |
+
|
216 |
+
@torch.no_grad()
|
217 |
+
def copy_params(self):
|
218 |
+
for model_pair in self.model_pairs:
|
219 |
+
for param, param_m in zip(model_pair[0].parameters(), model_pair[1].parameters()):
|
220 |
+
param_m.data.copy_(param.data) # initialize
|
221 |
+
param_m.requires_grad = False # not update by gradient
|
222 |
+
|
223 |
+
|
224 |
+
@torch.no_grad()
|
225 |
+
def _momentum_update(self):
|
226 |
+
for model_pair in self.model_pairs:
|
227 |
+
for param, param_m in zip(model_pair[0].parameters(), model_pair[1].parameters()):
|
228 |
+
param_m.data = param_m.data * self.momentum + param.data * (1. - self.momentum)
|
229 |
+
|
230 |
+
|
231 |
+
@torch.no_grad()
|
232 |
+
def _dequeue_and_enqueue(self, image_feat, text_feat):
|
233 |
+
# gather keys before updating queue
|
234 |
+
image_feats = concat_all_gather(image_feat)
|
235 |
+
text_feats = concat_all_gather(text_feat)
|
236 |
+
|
237 |
+
batch_size = image_feats.shape[0]
|
238 |
+
|
239 |
+
ptr = int(self.queue_ptr)
|
240 |
+
assert self.queue_size % batch_size == 0 # for simplicity
|
241 |
+
|
242 |
+
# replace the keys at ptr (dequeue and enqueue)
|
243 |
+
self.image_queue[:, ptr:ptr + batch_size] = image_feats.T
|
244 |
+
self.text_queue[:, ptr:ptr + batch_size] = text_feats.T
|
245 |
+
ptr = (ptr + batch_size) % self.queue_size # move pointer
|
246 |
+
|
247 |
+
self.queue_ptr[0] = ptr
|
248 |
+
|
249 |
+
|
250 |
+
def blip_pretrain(**kwargs):
|
251 |
+
model = BLIP_Pretrain(**kwargs)
|
252 |
+
return model
|
253 |
+
|
254 |
+
|
255 |
+
@torch.no_grad()
|
256 |
+
def concat_all_gather(tensor):
|
257 |
+
"""
|
258 |
+
Performs all_gather operation on the provided tensors.
|
259 |
+
*** Warning ***: torch.distributed.all_gather has no gradient.
|
260 |
+
"""
|
261 |
+
tensors_gather = [torch.ones_like(tensor)
|
262 |
+
for _ in range(torch.distributed.get_world_size())]
|
263 |
+
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
|
264 |
+
|
265 |
+
output = torch.cat(tensors_gather, dim=0)
|
266 |
+
return output
|
267 |
+
|
268 |
+
|
269 |
+
from typing import List
|
270 |
+
def tie_encoder_decoder_weights(encoder: nn.Module, decoder: nn.Module, base_model_prefix: str, skip_key:str):
|
271 |
+
uninitialized_encoder_weights: List[str] = []
|
272 |
+
if decoder.__class__ != encoder.__class__:
|
273 |
+
print(
|
274 |
+
f"{decoder.__class__} and {encoder.__class__} are not equal. In this case make sure that all encoder weights are correctly initialized."
|
275 |
+
)
|
276 |
+
|
277 |
+
def tie_encoder_to_decoder_recursively(
|
278 |
+
decoder_pointer: nn.Module,
|
279 |
+
encoder_pointer: nn.Module,
|
280 |
+
module_name: str,
|
281 |
+
uninitialized_encoder_weights: List[str],
|
282 |
+
skip_key: str,
|
283 |
+
depth=0,
|
284 |
+
):
|
285 |
+
assert isinstance(decoder_pointer, nn.Module) and isinstance(
|
286 |
+
encoder_pointer, nn.Module
|
287 |
+
), f"{decoder_pointer} and {encoder_pointer} have to be of type torch.nn.Module"
|
288 |
+
if hasattr(decoder_pointer, "weight") and skip_key not in module_name:
|
289 |
+
assert hasattr(encoder_pointer, "weight")
|
290 |
+
encoder_pointer.weight = decoder_pointer.weight
|
291 |
+
if hasattr(decoder_pointer, "bias"):
|
292 |
+
assert hasattr(encoder_pointer, "bias")
|
293 |
+
encoder_pointer.bias = decoder_pointer.bias
|
294 |
+
print(module_name+' is tied')
|
295 |
+
return
|
296 |
+
|
297 |
+
encoder_modules = encoder_pointer._modules
|
298 |
+
decoder_modules = decoder_pointer._modules
|
299 |
+
if len(decoder_modules) > 0:
|
300 |
+
assert (
|
301 |
+
len(encoder_modules) > 0
|
302 |
+
), f"Encoder module {encoder_pointer} does not match decoder module {decoder_pointer}"
|
303 |
+
|
304 |
+
all_encoder_weights = set([module_name + "/" + sub_name for sub_name in encoder_modules.keys()])
|
305 |
+
encoder_layer_pos = 0
|
306 |
+
for name, module in decoder_modules.items():
|
307 |
+
if name.isdigit():
|
308 |
+
encoder_name = str(int(name) + encoder_layer_pos)
|
309 |
+
decoder_name = name
|
310 |
+
if not isinstance(decoder_modules[decoder_name], type(encoder_modules[encoder_name])) and len(
|
311 |
+
encoder_modules
|
312 |
+
) != len(decoder_modules):
|
313 |
+
# this can happen if the name corresponds to the position in a list module list of layers
|
314 |
+
# in this case the decoder has added a cross-attention that the encoder does not have
|
315 |
+
# thus skip this step and subtract one layer pos from encoder
|
316 |
+
encoder_layer_pos -= 1
|
317 |
+
continue
|
318 |
+
elif name not in encoder_modules:
|
319 |
+
continue
|
320 |
+
elif depth > 500:
|
321 |
+
raise ValueError(
|
322 |
+
"Max depth of recursive function `tie_encoder_to_decoder` reached. It seems that there is a circular dependency between two or more `nn.Modules` of your model."
|
323 |
+
)
|
324 |
+
else:
|
325 |
+
decoder_name = encoder_name = name
|
326 |
+
tie_encoder_to_decoder_recursively(
|
327 |
+
decoder_modules[decoder_name],
|
328 |
+
encoder_modules[encoder_name],
|
329 |
+
module_name + "/" + name,
|
330 |
+
uninitialized_encoder_weights,
|
331 |
+
skip_key,
|
332 |
+
depth=depth + 1,
|
333 |
+
)
|
334 |
+
all_encoder_weights.remove(module_name + "/" + encoder_name)
|
335 |
+
|
336 |
+
uninitialized_encoder_weights += list(all_encoder_weights)
|
337 |
+
|
338 |
+
# tie weights recursively
|
339 |
+
tie_encoder_to_decoder_recursively(decoder, encoder, base_model_prefix, uninitialized_encoder_weights, skip_key)
|
extras/BLIP/models/blip_retrieval.py
ADDED
@@ -0,0 +1,319 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from extras.BLIP.models.med import BertConfig, BertModel
|
2 |
+
from transformers import BertTokenizer
|
3 |
+
|
4 |
+
import torch
|
5 |
+
from torch import nn
|
6 |
+
import torch.nn.functional as F
|
7 |
+
|
8 |
+
from extras.BLIP.models.blip import create_vit, init_tokenizer, load_checkpoint
|
9 |
+
|
10 |
+
class BLIP_Retrieval(nn.Module):
|
11 |
+
def __init__(self,
|
12 |
+
med_config = 'configs/med_config.json',
|
13 |
+
image_size = 384,
|
14 |
+
vit = 'base',
|
15 |
+
vit_grad_ckpt = False,
|
16 |
+
vit_ckpt_layer = 0,
|
17 |
+
embed_dim = 256,
|
18 |
+
queue_size = 57600,
|
19 |
+
momentum = 0.995,
|
20 |
+
negative_all_rank = False,
|
21 |
+
):
|
22 |
+
"""
|
23 |
+
Args:
|
24 |
+
med_config (str): path for the mixture of encoder-decoder model's configuration file
|
25 |
+
image_size (int): input image size
|
26 |
+
vit (str): model size of vision transformer
|
27 |
+
"""
|
28 |
+
super().__init__()
|
29 |
+
|
30 |
+
self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer)
|
31 |
+
self.tokenizer = init_tokenizer()
|
32 |
+
med_config = BertConfig.from_json_file(med_config)
|
33 |
+
med_config.encoder_width = vision_width
|
34 |
+
self.text_encoder = BertModel(config=med_config, add_pooling_layer=False)
|
35 |
+
|
36 |
+
text_width = self.text_encoder.config.hidden_size
|
37 |
+
|
38 |
+
self.vision_proj = nn.Linear(vision_width, embed_dim)
|
39 |
+
self.text_proj = nn.Linear(text_width, embed_dim)
|
40 |
+
|
41 |
+
self.itm_head = nn.Linear(text_width, 2)
|
42 |
+
|
43 |
+
# create momentum encoders
|
44 |
+
self.visual_encoder_m, vision_width = create_vit(vit,image_size)
|
45 |
+
self.vision_proj_m = nn.Linear(vision_width, embed_dim)
|
46 |
+
self.text_encoder_m = BertModel(config=med_config, add_pooling_layer=False)
|
47 |
+
self.text_proj_m = nn.Linear(text_width, embed_dim)
|
48 |
+
|
49 |
+
self.model_pairs = [[self.visual_encoder,self.visual_encoder_m],
|
50 |
+
[self.vision_proj,self.vision_proj_m],
|
51 |
+
[self.text_encoder,self.text_encoder_m],
|
52 |
+
[self.text_proj,self.text_proj_m],
|
53 |
+
]
|
54 |
+
self.copy_params()
|
55 |
+
|
56 |
+
# create the queue
|
57 |
+
self.register_buffer("image_queue", torch.randn(embed_dim, queue_size))
|
58 |
+
self.register_buffer("text_queue", torch.randn(embed_dim, queue_size))
|
59 |
+
self.register_buffer("idx_queue", torch.full((1,queue_size),-100))
|
60 |
+
self.register_buffer("ptr_queue", torch.zeros(1, dtype=torch.long))
|
61 |
+
|
62 |
+
self.image_queue = nn.functional.normalize(self.image_queue, dim=0)
|
63 |
+
self.text_queue = nn.functional.normalize(self.text_queue, dim=0)
|
64 |
+
|
65 |
+
self.queue_size = queue_size
|
66 |
+
self.momentum = momentum
|
67 |
+
self.temp = nn.Parameter(0.07*torch.ones([]))
|
68 |
+
|
69 |
+
self.negative_all_rank = negative_all_rank
|
70 |
+
|
71 |
+
|
72 |
+
def forward(self, image, caption, alpha, idx):
|
73 |
+
with torch.no_grad():
|
74 |
+
self.temp.clamp_(0.001,0.5)
|
75 |
+
|
76 |
+
image_embeds = self.visual_encoder(image)
|
77 |
+
image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device)
|
78 |
+
image_feat = F.normalize(self.vision_proj(image_embeds[:,0,:]),dim=-1)
|
79 |
+
|
80 |
+
text = self.tokenizer(caption, padding='max_length', truncation=True, max_length=35,
|
81 |
+
return_tensors="pt").to(image.device)
|
82 |
+
|
83 |
+
text_output = self.text_encoder(text.input_ids, attention_mask = text.attention_mask,
|
84 |
+
return_dict = True, mode = 'text')
|
85 |
+
text_feat = F.normalize(self.text_proj(text_output.last_hidden_state[:,0,:]),dim=-1)
|
86 |
+
|
87 |
+
###============== Image-text Contrastive Learning ===================###
|
88 |
+
idx = idx.view(-1,1)
|
89 |
+
idx_all = torch.cat([idx.t(), self.idx_queue.clone().detach()],dim=1)
|
90 |
+
pos_idx = torch.eq(idx, idx_all).float()
|
91 |
+
sim_targets = pos_idx / pos_idx.sum(1,keepdim=True)
|
92 |
+
|
93 |
+
# get momentum features
|
94 |
+
with torch.no_grad():
|
95 |
+
self._momentum_update()
|
96 |
+
image_embeds_m = self.visual_encoder_m(image)
|
97 |
+
image_feat_m = F.normalize(self.vision_proj_m(image_embeds_m[:,0,:]),dim=-1)
|
98 |
+
image_feat_m_all = torch.cat([image_feat_m.t(),self.image_queue.clone().detach()],dim=1)
|
99 |
+
|
100 |
+
text_output_m = self.text_encoder_m(text.input_ids, attention_mask = text.attention_mask,
|
101 |
+
return_dict = True, mode = 'text')
|
102 |
+
text_feat_m = F.normalize(self.text_proj_m(text_output_m.last_hidden_state[:,0,:]),dim=-1)
|
103 |
+
text_feat_m_all = torch.cat([text_feat_m.t(),self.text_queue.clone().detach()],dim=1)
|
104 |
+
|
105 |
+
sim_i2t_m = image_feat_m @ text_feat_m_all / self.temp
|
106 |
+
sim_t2i_m = text_feat_m @ image_feat_m_all / self.temp
|
107 |
+
|
108 |
+
sim_i2t_targets = alpha * F.softmax(sim_i2t_m, dim=1) + (1 - alpha) * sim_targets
|
109 |
+
sim_t2i_targets = alpha * F.softmax(sim_t2i_m, dim=1) + (1 - alpha) * sim_targets
|
110 |
+
|
111 |
+
sim_i2t = image_feat @ text_feat_m_all / self.temp
|
112 |
+
sim_t2i = text_feat @ image_feat_m_all / self.temp
|
113 |
+
|
114 |
+
loss_i2t = -torch.sum(F.log_softmax(sim_i2t, dim=1)*sim_i2t_targets,dim=1).mean()
|
115 |
+
loss_t2i = -torch.sum(F.log_softmax(sim_t2i, dim=1)*sim_t2i_targets,dim=1).mean()
|
116 |
+
|
117 |
+
loss_ita = (loss_i2t+loss_t2i)/2
|
118 |
+
|
119 |
+
idxs = concat_all_gather(idx)
|
120 |
+
self._dequeue_and_enqueue(image_feat_m, text_feat_m, idxs)
|
121 |
+
|
122 |
+
###============== Image-text Matching ===================###
|
123 |
+
encoder_input_ids = text.input_ids.clone()
|
124 |
+
encoder_input_ids[:,0] = self.tokenizer.enc_token_id
|
125 |
+
|
126 |
+
# forward the positve image-text pair
|
127 |
+
bs = image.size(0)
|
128 |
+
output_pos = self.text_encoder(encoder_input_ids,
|
129 |
+
attention_mask = text.attention_mask,
|
130 |
+
encoder_hidden_states = image_embeds,
|
131 |
+
encoder_attention_mask = image_atts,
|
132 |
+
return_dict = True,
|
133 |
+
)
|
134 |
+
|
135 |
+
|
136 |
+
if self.negative_all_rank:
|
137 |
+
# compute sample similarity
|
138 |
+
with torch.no_grad():
|
139 |
+
mask = torch.eq(idx, idxs.t())
|
140 |
+
|
141 |
+
image_feat_world = concat_all_gather(image_feat)
|
142 |
+
text_feat_world = concat_all_gather(text_feat)
|
143 |
+
|
144 |
+
sim_i2t = image_feat @ text_feat_world.t() / self.temp
|
145 |
+
sim_t2i = text_feat @ image_feat_world.t() / self.temp
|
146 |
+
|
147 |
+
weights_i2t = F.softmax(sim_i2t,dim=1)
|
148 |
+
weights_i2t.masked_fill_(mask, 0)
|
149 |
+
|
150 |
+
weights_t2i = F.softmax(sim_t2i,dim=1)
|
151 |
+
weights_t2i.masked_fill_(mask, 0)
|
152 |
+
|
153 |
+
image_embeds_world = all_gather_with_grad(image_embeds)
|
154 |
+
|
155 |
+
# select a negative image (from all ranks) for each text
|
156 |
+
image_embeds_neg = []
|
157 |
+
for b in range(bs):
|
158 |
+
neg_idx = torch.multinomial(weights_t2i[b], 1).item()
|
159 |
+
image_embeds_neg.append(image_embeds_world[neg_idx])
|
160 |
+
image_embeds_neg = torch.stack(image_embeds_neg,dim=0)
|
161 |
+
|
162 |
+
# select a negative text (from all ranks) for each image
|
163 |
+
input_ids_world = concat_all_gather(encoder_input_ids)
|
164 |
+
att_mask_world = concat_all_gather(text.attention_mask)
|
165 |
+
|
166 |
+
text_ids_neg = []
|
167 |
+
text_atts_neg = []
|
168 |
+
for b in range(bs):
|
169 |
+
neg_idx = torch.multinomial(weights_i2t[b], 1).item()
|
170 |
+
text_ids_neg.append(input_ids_world[neg_idx])
|
171 |
+
text_atts_neg.append(att_mask_world[neg_idx])
|
172 |
+
|
173 |
+
else:
|
174 |
+
with torch.no_grad():
|
175 |
+
mask = torch.eq(idx, idx.t())
|
176 |
+
|
177 |
+
sim_i2t = image_feat @ text_feat.t() / self.temp
|
178 |
+
sim_t2i = text_feat @ image_feat.t() / self.temp
|
179 |
+
|
180 |
+
weights_i2t = F.softmax(sim_i2t,dim=1)
|
181 |
+
weights_i2t.masked_fill_(mask, 0)
|
182 |
+
|
183 |
+
weights_t2i = F.softmax(sim_t2i,dim=1)
|
184 |
+
weights_t2i.masked_fill_(mask, 0)
|
185 |
+
|
186 |
+
# select a negative image (from same rank) for each text
|
187 |
+
image_embeds_neg = []
|
188 |
+
for b in range(bs):
|
189 |
+
neg_idx = torch.multinomial(weights_t2i[b], 1).item()
|
190 |
+
image_embeds_neg.append(image_embeds[neg_idx])
|
191 |
+
image_embeds_neg = torch.stack(image_embeds_neg,dim=0)
|
192 |
+
|
193 |
+
# select a negative text (from same rank) for each image
|
194 |
+
text_ids_neg = []
|
195 |
+
text_atts_neg = []
|
196 |
+
for b in range(bs):
|
197 |
+
neg_idx = torch.multinomial(weights_i2t[b], 1).item()
|
198 |
+
text_ids_neg.append(encoder_input_ids[neg_idx])
|
199 |
+
text_atts_neg.append(text.attention_mask[neg_idx])
|
200 |
+
|
201 |
+
text_ids_neg = torch.stack(text_ids_neg,dim=0)
|
202 |
+
text_atts_neg = torch.stack(text_atts_neg,dim=0)
|
203 |
+
|
204 |
+
text_ids_all = torch.cat([encoder_input_ids, text_ids_neg],dim=0)
|
205 |
+
text_atts_all = torch.cat([text.attention_mask, text_atts_neg],dim=0)
|
206 |
+
|
207 |
+
image_embeds_all = torch.cat([image_embeds_neg,image_embeds],dim=0)
|
208 |
+
image_atts_all = torch.cat([image_atts,image_atts],dim=0)
|
209 |
+
|
210 |
+
output_neg = self.text_encoder(text_ids_all,
|
211 |
+
attention_mask = text_atts_all,
|
212 |
+
encoder_hidden_states = image_embeds_all,
|
213 |
+
encoder_attention_mask = image_atts_all,
|
214 |
+
return_dict = True,
|
215 |
+
)
|
216 |
+
|
217 |
+
|
218 |
+
vl_embeddings = torch.cat([output_pos.last_hidden_state[:,0,:], output_neg.last_hidden_state[:,0,:]],dim=0)
|
219 |
+
vl_output = self.itm_head(vl_embeddings)
|
220 |
+
|
221 |
+
itm_labels = torch.cat([torch.ones(bs,dtype=torch.long),torch.zeros(2*bs,dtype=torch.long)],
|
222 |
+
dim=0).to(image.device)
|
223 |
+
loss_itm = F.cross_entropy(vl_output, itm_labels)
|
224 |
+
|
225 |
+
return loss_ita, loss_itm
|
226 |
+
|
227 |
+
|
228 |
+
@torch.no_grad()
|
229 |
+
def copy_params(self):
|
230 |
+
for model_pair in self.model_pairs:
|
231 |
+
for param, param_m in zip(model_pair[0].parameters(), model_pair[1].parameters()):
|
232 |
+
param_m.data.copy_(param.data) # initialize
|
233 |
+
param_m.requires_grad = False # not update by gradient
|
234 |
+
|
235 |
+
|
236 |
+
@torch.no_grad()
|
237 |
+
def _momentum_update(self):
|
238 |
+
for model_pair in self.model_pairs:
|
239 |
+
for param, param_m in zip(model_pair[0].parameters(), model_pair[1].parameters()):
|
240 |
+
param_m.data = param_m.data * self.momentum + param.data * (1. - self.momentum)
|
241 |
+
|
242 |
+
|
243 |
+
@torch.no_grad()
|
244 |
+
def _dequeue_and_enqueue(self, image_feat, text_feat, idxs):
|
245 |
+
# gather keys before updating queue
|
246 |
+
image_feats = concat_all_gather(image_feat)
|
247 |
+
text_feats = concat_all_gather(text_feat)
|
248 |
+
|
249 |
+
|
250 |
+
batch_size = image_feats.shape[0]
|
251 |
+
|
252 |
+
ptr = int(self.ptr_queue)
|
253 |
+
assert self.queue_size % batch_size == 0 # for simplicity
|
254 |
+
|
255 |
+
# replace the keys at ptr (dequeue and enqueue)
|
256 |
+
self.image_queue[:, ptr:ptr + batch_size] = image_feats.T
|
257 |
+
self.text_queue[:, ptr:ptr + batch_size] = text_feats.T
|
258 |
+
self.idx_queue[:, ptr:ptr + batch_size] = idxs.T
|
259 |
+
ptr = (ptr + batch_size) % self.queue_size # move pointer
|
260 |
+
|
261 |
+
self.ptr_queue[0] = ptr
|
262 |
+
|
263 |
+
|
264 |
+
def blip_retrieval(pretrained='',**kwargs):
|
265 |
+
model = BLIP_Retrieval(**kwargs)
|
266 |
+
if pretrained:
|
267 |
+
model,msg = load_checkpoint(model,pretrained)
|
268 |
+
print("missing keys:")
|
269 |
+
print(msg.missing_keys)
|
270 |
+
return model
|
271 |
+
|
272 |
+
|
273 |
+
@torch.no_grad()
|
274 |
+
def concat_all_gather(tensor):
|
275 |
+
"""
|
276 |
+
Performs all_gather operation on the provided tensors.
|
277 |
+
*** Warning ***: torch.distributed.all_gather has no gradient.
|
278 |
+
"""
|
279 |
+
tensors_gather = [torch.ones_like(tensor)
|
280 |
+
for _ in range(torch.distributed.get_world_size())]
|
281 |
+
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
|
282 |
+
|
283 |
+
output = torch.cat(tensors_gather, dim=0)
|
284 |
+
return output
|
285 |
+
|
286 |
+
|
287 |
+
class GatherLayer(torch.autograd.Function):
|
288 |
+
"""
|
289 |
+
Gather tensors from all workers with support for backward propagation:
|
290 |
+
This implementation does not cut the gradients as torch.distributed.all_gather does.
|
291 |
+
"""
|
292 |
+
|
293 |
+
@staticmethod
|
294 |
+
def forward(ctx, x):
|
295 |
+
output = [torch.zeros_like(x) for _ in range(torch.distributed.get_world_size())]
|
296 |
+
torch.distributed.all_gather(output, x)
|
297 |
+
return tuple(output)
|
298 |
+
|
299 |
+
@staticmethod
|
300 |
+
def backward(ctx, *grads):
|
301 |
+
all_gradients = torch.stack(grads)
|
302 |
+
torch.distributed.all_reduce(all_gradients)
|
303 |
+
return all_gradients[torch.distributed.get_rank()]
|
304 |
+
|
305 |
+
|
306 |
+
def all_gather_with_grad(tensors):
|
307 |
+
"""
|
308 |
+
Performs all_gather operation on the provided tensors.
|
309 |
+
Graph remains connected for backward grad computation.
|
310 |
+
"""
|
311 |
+
# Queue the gathered tensors
|
312 |
+
world_size = torch.distributed.get_world_size()
|
313 |
+
# There is no need for reduction in the single-proc case
|
314 |
+
if world_size == 1:
|
315 |
+
return tensors
|
316 |
+
|
317 |
+
tensor_all = GatherLayer.apply(tensors)
|
318 |
+
|
319 |
+
return torch.cat(tensor_all, dim=0)
|
extras/BLIP/models/blip_vqa.py
ADDED
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from extras.BLIP.models.med import BertConfig, BertModel, BertLMHeadModel
|
2 |
+
from extras.BLIP.models.blip import create_vit, init_tokenizer, load_checkpoint
|
3 |
+
|
4 |
+
import torch
|
5 |
+
from torch import nn
|
6 |
+
import torch.nn.functional as F
|
7 |
+
from transformers import BertTokenizer
|
8 |
+
import numpy as np
|
9 |
+
|
10 |
+
class BLIP_VQA(nn.Module):
|
11 |
+
def __init__(self,
|
12 |
+
med_config = 'configs/med_config.json',
|
13 |
+
image_size = 480,
|
14 |
+
vit = 'base',
|
15 |
+
vit_grad_ckpt = False,
|
16 |
+
vit_ckpt_layer = 0,
|
17 |
+
):
|
18 |
+
"""
|
19 |
+
Args:
|
20 |
+
med_config (str): path for the mixture of encoder-decoder model's configuration file
|
21 |
+
image_size (int): input image size
|
22 |
+
vit (str): model size of vision transformer
|
23 |
+
"""
|
24 |
+
super().__init__()
|
25 |
+
|
26 |
+
self.visual_encoder, vision_width = create_vit(vit, image_size, vit_grad_ckpt, vit_ckpt_layer, drop_path_rate=0.1)
|
27 |
+
self.tokenizer = init_tokenizer()
|
28 |
+
|
29 |
+
encoder_config = BertConfig.from_json_file(med_config)
|
30 |
+
encoder_config.encoder_width = vision_width
|
31 |
+
self.text_encoder = BertModel(config=encoder_config, add_pooling_layer=False)
|
32 |
+
|
33 |
+
decoder_config = BertConfig.from_json_file(med_config)
|
34 |
+
self.text_decoder = BertLMHeadModel(config=decoder_config)
|
35 |
+
|
36 |
+
|
37 |
+
def forward(self, image, question, answer=None, n=None, weights=None, train=True, inference='rank', k_test=128):
|
38 |
+
|
39 |
+
image_embeds = self.visual_encoder(image)
|
40 |
+
image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device)
|
41 |
+
|
42 |
+
question = self.tokenizer(question, padding='longest', truncation=True, max_length=35,
|
43 |
+
return_tensors="pt").to(image.device)
|
44 |
+
question.input_ids[:,0] = self.tokenizer.enc_token_id
|
45 |
+
|
46 |
+
if train:
|
47 |
+
'''
|
48 |
+
n: number of answers for each question
|
49 |
+
weights: weight for each answer
|
50 |
+
'''
|
51 |
+
answer = self.tokenizer(answer, padding='longest', return_tensors="pt").to(image.device)
|
52 |
+
answer.input_ids[:,0] = self.tokenizer.bos_token_id
|
53 |
+
answer_targets = answer.input_ids.masked_fill(answer.input_ids == self.tokenizer.pad_token_id, -100)
|
54 |
+
|
55 |
+
question_output = self.text_encoder(question.input_ids,
|
56 |
+
attention_mask = question.attention_mask,
|
57 |
+
encoder_hidden_states = image_embeds,
|
58 |
+
encoder_attention_mask = image_atts,
|
59 |
+
return_dict = True)
|
60 |
+
|
61 |
+
question_states = []
|
62 |
+
question_atts = []
|
63 |
+
for b, n in enumerate(n):
|
64 |
+
question_states += [question_output.last_hidden_state[b]]*n
|
65 |
+
question_atts += [question.attention_mask[b]]*n
|
66 |
+
question_states = torch.stack(question_states,0)
|
67 |
+
question_atts = torch.stack(question_atts,0)
|
68 |
+
|
69 |
+
answer_output = self.text_decoder(answer.input_ids,
|
70 |
+
attention_mask = answer.attention_mask,
|
71 |
+
encoder_hidden_states = question_states,
|
72 |
+
encoder_attention_mask = question_atts,
|
73 |
+
labels = answer_targets,
|
74 |
+
return_dict = True,
|
75 |
+
reduction = 'none',
|
76 |
+
)
|
77 |
+
|
78 |
+
loss = weights * answer_output.loss
|
79 |
+
loss = loss.sum()/image.size(0)
|
80 |
+
|
81 |
+
return loss
|
82 |
+
|
83 |
+
|
84 |
+
else:
|
85 |
+
question_output = self.text_encoder(question.input_ids,
|
86 |
+
attention_mask = question.attention_mask,
|
87 |
+
encoder_hidden_states = image_embeds,
|
88 |
+
encoder_attention_mask = image_atts,
|
89 |
+
return_dict = True)
|
90 |
+
|
91 |
+
if inference=='generate':
|
92 |
+
num_beams = 3
|
93 |
+
question_states = question_output.last_hidden_state.repeat_interleave(num_beams,dim=0)
|
94 |
+
question_atts = torch.ones(question_states.size()[:-1],dtype=torch.long).to(question_states.device)
|
95 |
+
model_kwargs = {"encoder_hidden_states": question_states, "encoder_attention_mask":question_atts}
|
96 |
+
|
97 |
+
bos_ids = torch.full((image.size(0),1),fill_value=self.tokenizer.bos_token_id,device=image.device)
|
98 |
+
|
99 |
+
outputs = self.text_decoder.generate(input_ids=bos_ids,
|
100 |
+
max_length=10,
|
101 |
+
min_length=1,
|
102 |
+
num_beams=num_beams,
|
103 |
+
eos_token_id=self.tokenizer.sep_token_id,
|
104 |
+
pad_token_id=self.tokenizer.pad_token_id,
|
105 |
+
**model_kwargs)
|
106 |
+
|
107 |
+
answers = []
|
108 |
+
for output in outputs:
|
109 |
+
answer = self.tokenizer.decode(output, skip_special_tokens=True)
|
110 |
+
answers.append(answer)
|
111 |
+
return answers
|
112 |
+
|
113 |
+
elif inference=='rank':
|
114 |
+
max_ids = self.rank_answer(question_output.last_hidden_state, question.attention_mask,
|
115 |
+
answer.input_ids, answer.attention_mask, k_test)
|
116 |
+
return max_ids
|
117 |
+
|
118 |
+
|
119 |
+
|
120 |
+
def rank_answer(self, question_states, question_atts, answer_ids, answer_atts, k):
|
121 |
+
|
122 |
+
num_ques = question_states.size(0)
|
123 |
+
start_ids = answer_ids[0,0].repeat(num_ques,1) # bos token
|
124 |
+
|
125 |
+
start_output = self.text_decoder(start_ids,
|
126 |
+
encoder_hidden_states = question_states,
|
127 |
+
encoder_attention_mask = question_atts,
|
128 |
+
return_dict = True,
|
129 |
+
reduction = 'none')
|
130 |
+
logits = start_output.logits[:,0,:] # first token's logit
|
131 |
+
|
132 |
+
# topk_probs: top-k probability
|
133 |
+
# topk_ids: [num_question, k]
|
134 |
+
answer_first_token = answer_ids[:,1]
|
135 |
+
prob_first_token = F.softmax(logits,dim=1).index_select(dim=1, index=answer_first_token)
|
136 |
+
topk_probs, topk_ids = prob_first_token.topk(k,dim=1)
|
137 |
+
|
138 |
+
# answer input: [num_question*k, answer_len]
|
139 |
+
input_ids = []
|
140 |
+
input_atts = []
|
141 |
+
for b, topk_id in enumerate(topk_ids):
|
142 |
+
input_ids.append(answer_ids.index_select(dim=0, index=topk_id))
|
143 |
+
input_atts.append(answer_atts.index_select(dim=0, index=topk_id))
|
144 |
+
input_ids = torch.cat(input_ids,dim=0)
|
145 |
+
input_atts = torch.cat(input_atts,dim=0)
|
146 |
+
|
147 |
+
targets_ids = input_ids.masked_fill(input_ids == self.tokenizer.pad_token_id, -100)
|
148 |
+
|
149 |
+
# repeat encoder's output for top-k answers
|
150 |
+
question_states = tile(question_states, 0, k)
|
151 |
+
question_atts = tile(question_atts, 0, k)
|
152 |
+
|
153 |
+
output = self.text_decoder(input_ids,
|
154 |
+
attention_mask = input_atts,
|
155 |
+
encoder_hidden_states = question_states,
|
156 |
+
encoder_attention_mask = question_atts,
|
157 |
+
labels = targets_ids,
|
158 |
+
return_dict = True,
|
159 |
+
reduction = 'none')
|
160 |
+
|
161 |
+
log_probs_sum = -output.loss
|
162 |
+
log_probs_sum = log_probs_sum.view(num_ques,k)
|
163 |
+
|
164 |
+
max_topk_ids = log_probs_sum.argmax(dim=1)
|
165 |
+
max_ids = topk_ids[max_topk_ids>=0,max_topk_ids]
|
166 |
+
|
167 |
+
return max_ids
|
168 |
+
|
169 |
+
|
170 |
+
def blip_vqa(pretrained='',**kwargs):
|
171 |
+
model = BLIP_VQA(**kwargs)
|
172 |
+
if pretrained:
|
173 |
+
model,msg = load_checkpoint(model,pretrained)
|
174 |
+
# assert(len(msg.missing_keys)==0)
|
175 |
+
return model
|
176 |
+
|
177 |
+
|
178 |
+
def tile(x, dim, n_tile):
|
179 |
+
init_dim = x.size(dim)
|
180 |
+
repeat_idx = [1] * x.dim()
|
181 |
+
repeat_idx[dim] = n_tile
|
182 |
+
x = x.repeat(*(repeat_idx))
|
183 |
+
order_index = torch.LongTensor(np.concatenate([init_dim * np.arange(n_tile) + i for i in range(init_dim)]))
|
184 |
+
return torch.index_select(x, dim, order_index.to(x.device))
|
185 |
+
|
186 |
+
|
extras/BLIP/models/med.py
ADDED
@@ -0,0 +1,955 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
* Copyright (c) 2022, salesforce.com, inc.
|
3 |
+
* All rights reserved.
|
4 |
+
* SPDX-License-Identifier: BSD-3-Clause
|
5 |
+
* For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
|
6 |
+
* By Junnan Li
|
7 |
+
* Based on huggingface code base
|
8 |
+
* https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert
|
9 |
+
'''
|
10 |
+
|
11 |
+
import math
|
12 |
+
import os
|
13 |
+
import warnings
|
14 |
+
from dataclasses import dataclass
|
15 |
+
from typing import Optional, Tuple
|
16 |
+
|
17 |
+
import torch
|
18 |
+
from torch import Tensor, device, dtype, nn
|
19 |
+
import torch.utils.checkpoint
|
20 |
+
from torch import nn
|
21 |
+
from torch.nn import CrossEntropyLoss
|
22 |
+
import torch.nn.functional as F
|
23 |
+
|
24 |
+
from transformers.activations import ACT2FN
|
25 |
+
from transformers.file_utils import (
|
26 |
+
ModelOutput,
|
27 |
+
)
|
28 |
+
from transformers.modeling_outputs import (
|
29 |
+
BaseModelOutputWithPastAndCrossAttentions,
|
30 |
+
BaseModelOutputWithPoolingAndCrossAttentions,
|
31 |
+
CausalLMOutputWithCrossAttentions,
|
32 |
+
MaskedLMOutput,
|
33 |
+
MultipleChoiceModelOutput,
|
34 |
+
NextSentencePredictorOutput,
|
35 |
+
QuestionAnsweringModelOutput,
|
36 |
+
SequenceClassifierOutput,
|
37 |
+
TokenClassifierOutput,
|
38 |
+
)
|
39 |
+
from transformers.modeling_utils import (
|
40 |
+
PreTrainedModel,
|
41 |
+
apply_chunking_to_forward,
|
42 |
+
find_pruneable_heads_and_indices,
|
43 |
+
prune_linear_layer,
|
44 |
+
)
|
45 |
+
from transformers.utils import logging
|
46 |
+
from transformers.models.bert.configuration_bert import BertConfig
|
47 |
+
|
48 |
+
|
49 |
+
logger = logging.get_logger(__name__)
|
50 |
+
|
51 |
+
|
52 |
+
class BertEmbeddings(nn.Module):
|
53 |
+
"""Construct the embeddings from word and position embeddings."""
|
54 |
+
|
55 |
+
def __init__(self, config):
|
56 |
+
super().__init__()
|
57 |
+
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
|
58 |
+
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
|
59 |
+
|
60 |
+
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
|
61 |
+
# any TensorFlow checkpoint file
|
62 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
63 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
64 |
+
|
65 |
+
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
|
66 |
+
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
|
67 |
+
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
|
68 |
+
|
69 |
+
self.config = config
|
70 |
+
|
71 |
+
def forward(
|
72 |
+
self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
|
73 |
+
):
|
74 |
+
if input_ids is not None:
|
75 |
+
input_shape = input_ids.size()
|
76 |
+
else:
|
77 |
+
input_shape = inputs_embeds.size()[:-1]
|
78 |
+
|
79 |
+
seq_length = input_shape[1]
|
80 |
+
|
81 |
+
if position_ids is None:
|
82 |
+
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
|
83 |
+
|
84 |
+
if inputs_embeds is None:
|
85 |
+
inputs_embeds = self.word_embeddings(input_ids)
|
86 |
+
|
87 |
+
embeddings = inputs_embeds
|
88 |
+
|
89 |
+
if self.position_embedding_type == "absolute":
|
90 |
+
position_embeddings = self.position_embeddings(position_ids)
|
91 |
+
embeddings += position_embeddings
|
92 |
+
embeddings = self.LayerNorm(embeddings)
|
93 |
+
embeddings = self.dropout(embeddings)
|
94 |
+
return embeddings
|
95 |
+
|
96 |
+
|
97 |
+
class BertSelfAttention(nn.Module):
|
98 |
+
def __init__(self, config, is_cross_attention):
|
99 |
+
super().__init__()
|
100 |
+
self.config = config
|
101 |
+
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
|
102 |
+
raise ValueError(
|
103 |
+
"The hidden size (%d) is not a multiple of the number of attention "
|
104 |
+
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
|
105 |
+
)
|
106 |
+
|
107 |
+
self.num_attention_heads = config.num_attention_heads
|
108 |
+
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
|
109 |
+
self.all_head_size = self.num_attention_heads * self.attention_head_size
|
110 |
+
|
111 |
+
self.query = nn.Linear(config.hidden_size, self.all_head_size)
|
112 |
+
if is_cross_attention:
|
113 |
+
self.key = nn.Linear(config.encoder_width, self.all_head_size)
|
114 |
+
self.value = nn.Linear(config.encoder_width, self.all_head_size)
|
115 |
+
else:
|
116 |
+
self.key = nn.Linear(config.hidden_size, self.all_head_size)
|
117 |
+
self.value = nn.Linear(config.hidden_size, self.all_head_size)
|
118 |
+
|
119 |
+
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
|
120 |
+
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
|
121 |
+
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
|
122 |
+
self.max_position_embeddings = config.max_position_embeddings
|
123 |
+
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
|
124 |
+
self.save_attention = False
|
125 |
+
|
126 |
+
def save_attn_gradients(self, attn_gradients):
|
127 |
+
self.attn_gradients = attn_gradients
|
128 |
+
|
129 |
+
def get_attn_gradients(self):
|
130 |
+
return self.attn_gradients
|
131 |
+
|
132 |
+
def save_attention_map(self, attention_map):
|
133 |
+
self.attention_map = attention_map
|
134 |
+
|
135 |
+
def get_attention_map(self):
|
136 |
+
return self.attention_map
|
137 |
+
|
138 |
+
def transpose_for_scores(self, x):
|
139 |
+
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
|
140 |
+
x = x.view(*new_x_shape)
|
141 |
+
return x.permute(0, 2, 1, 3)
|
142 |
+
|
143 |
+
def forward(
|
144 |
+
self,
|
145 |
+
hidden_states,
|
146 |
+
attention_mask=None,
|
147 |
+
head_mask=None,
|
148 |
+
encoder_hidden_states=None,
|
149 |
+
encoder_attention_mask=None,
|
150 |
+
past_key_value=None,
|
151 |
+
output_attentions=False,
|
152 |
+
):
|
153 |
+
mixed_query_layer = self.query(hidden_states)
|
154 |
+
|
155 |
+
# If this is instantiated as a cross-attention module, the keys
|
156 |
+
# and values come from an encoder; the attention mask needs to be
|
157 |
+
# such that the encoder's padding tokens are not attended to.
|
158 |
+
is_cross_attention = encoder_hidden_states is not None
|
159 |
+
|
160 |
+
if is_cross_attention:
|
161 |
+
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
|
162 |
+
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
|
163 |
+
attention_mask = encoder_attention_mask
|
164 |
+
elif past_key_value is not None:
|
165 |
+
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
166 |
+
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
167 |
+
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
|
168 |
+
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
|
169 |
+
else:
|
170 |
+
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
171 |
+
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
172 |
+
|
173 |
+
query_layer = self.transpose_for_scores(mixed_query_layer)
|
174 |
+
|
175 |
+
past_key_value = (key_layer, value_layer)
|
176 |
+
|
177 |
+
# Take the dot product between "query" and "key" to get the raw attention scores.
|
178 |
+
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
|
179 |
+
|
180 |
+
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
|
181 |
+
seq_length = hidden_states.size()[1]
|
182 |
+
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
|
183 |
+
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
|
184 |
+
distance = position_ids_l - position_ids_r
|
185 |
+
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
|
186 |
+
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
|
187 |
+
|
188 |
+
if self.position_embedding_type == "relative_key":
|
189 |
+
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
|
190 |
+
attention_scores = attention_scores + relative_position_scores
|
191 |
+
elif self.position_embedding_type == "relative_key_query":
|
192 |
+
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
|
193 |
+
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
|
194 |
+
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
|
195 |
+
|
196 |
+
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
|
197 |
+
if attention_mask is not None:
|
198 |
+
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
|
199 |
+
attention_scores = attention_scores + attention_mask
|
200 |
+
|
201 |
+
# Normalize the attention scores to probabilities.
|
202 |
+
attention_probs = nn.Softmax(dim=-1)(attention_scores)
|
203 |
+
|
204 |
+
if is_cross_attention and self.save_attention:
|
205 |
+
self.save_attention_map(attention_probs)
|
206 |
+
attention_probs.register_hook(self.save_attn_gradients)
|
207 |
+
|
208 |
+
# This is actually dropping out entire tokens to attend to, which might
|
209 |
+
# seem a bit unusual, but is taken from the original Transformer paper.
|
210 |
+
attention_probs_dropped = self.dropout(attention_probs)
|
211 |
+
|
212 |
+
# Mask heads if we want to
|
213 |
+
if head_mask is not None:
|
214 |
+
attention_probs_dropped = attention_probs_dropped * head_mask
|
215 |
+
|
216 |
+
context_layer = torch.matmul(attention_probs_dropped, value_layer)
|
217 |
+
|
218 |
+
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
|
219 |
+
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
|
220 |
+
context_layer = context_layer.view(*new_context_layer_shape)
|
221 |
+
|
222 |
+
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
|
223 |
+
|
224 |
+
outputs = outputs + (past_key_value,)
|
225 |
+
return outputs
|
226 |
+
|
227 |
+
|
228 |
+
class BertSelfOutput(nn.Module):
|
229 |
+
def __init__(self, config):
|
230 |
+
super().__init__()
|
231 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
232 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
233 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
234 |
+
|
235 |
+
def forward(self, hidden_states, input_tensor):
|
236 |
+
hidden_states = self.dense(hidden_states)
|
237 |
+
hidden_states = self.dropout(hidden_states)
|
238 |
+
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
239 |
+
return hidden_states
|
240 |
+
|
241 |
+
|
242 |
+
class BertAttention(nn.Module):
|
243 |
+
def __init__(self, config, is_cross_attention=False):
|
244 |
+
super().__init__()
|
245 |
+
self.self = BertSelfAttention(config, is_cross_attention)
|
246 |
+
self.output = BertSelfOutput(config)
|
247 |
+
self.pruned_heads = set()
|
248 |
+
|
249 |
+
def prune_heads(self, heads):
|
250 |
+
if len(heads) == 0:
|
251 |
+
return
|
252 |
+
heads, index = find_pruneable_heads_and_indices(
|
253 |
+
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
|
254 |
+
)
|
255 |
+
|
256 |
+
# Prune linear layers
|
257 |
+
self.self.query = prune_linear_layer(self.self.query, index)
|
258 |
+
self.self.key = prune_linear_layer(self.self.key, index)
|
259 |
+
self.self.value = prune_linear_layer(self.self.value, index)
|
260 |
+
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
|
261 |
+
|
262 |
+
# Update hyper params and store pruned heads
|
263 |
+
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
|
264 |
+
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
|
265 |
+
self.pruned_heads = self.pruned_heads.union(heads)
|
266 |
+
|
267 |
+
def forward(
|
268 |
+
self,
|
269 |
+
hidden_states,
|
270 |
+
attention_mask=None,
|
271 |
+
head_mask=None,
|
272 |
+
encoder_hidden_states=None,
|
273 |
+
encoder_attention_mask=None,
|
274 |
+
past_key_value=None,
|
275 |
+
output_attentions=False,
|
276 |
+
):
|
277 |
+
self_outputs = self.self(
|
278 |
+
hidden_states,
|
279 |
+
attention_mask,
|
280 |
+
head_mask,
|
281 |
+
encoder_hidden_states,
|
282 |
+
encoder_attention_mask,
|
283 |
+
past_key_value,
|
284 |
+
output_attentions,
|
285 |
+
)
|
286 |
+
attention_output = self.output(self_outputs[0], hidden_states)
|
287 |
+
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
|
288 |
+
return outputs
|
289 |
+
|
290 |
+
|
291 |
+
class BertIntermediate(nn.Module):
|
292 |
+
def __init__(self, config):
|
293 |
+
super().__init__()
|
294 |
+
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
|
295 |
+
if isinstance(config.hidden_act, str):
|
296 |
+
self.intermediate_act_fn = ACT2FN[config.hidden_act]
|
297 |
+
else:
|
298 |
+
self.intermediate_act_fn = config.hidden_act
|
299 |
+
|
300 |
+
def forward(self, hidden_states):
|
301 |
+
hidden_states = self.dense(hidden_states)
|
302 |
+
hidden_states = self.intermediate_act_fn(hidden_states)
|
303 |
+
return hidden_states
|
304 |
+
|
305 |
+
|
306 |
+
class BertOutput(nn.Module):
|
307 |
+
def __init__(self, config):
|
308 |
+
super().__init__()
|
309 |
+
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
|
310 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
311 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
312 |
+
|
313 |
+
def forward(self, hidden_states, input_tensor):
|
314 |
+
hidden_states = self.dense(hidden_states)
|
315 |
+
hidden_states = self.dropout(hidden_states)
|
316 |
+
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
317 |
+
return hidden_states
|
318 |
+
|
319 |
+
|
320 |
+
class BertLayer(nn.Module):
|
321 |
+
def __init__(self, config, layer_num):
|
322 |
+
super().__init__()
|
323 |
+
self.config = config
|
324 |
+
self.chunk_size_feed_forward = config.chunk_size_feed_forward
|
325 |
+
self.seq_len_dim = 1
|
326 |
+
self.attention = BertAttention(config)
|
327 |
+
self.layer_num = layer_num
|
328 |
+
if self.config.add_cross_attention:
|
329 |
+
self.crossattention = BertAttention(config, is_cross_attention=self.config.add_cross_attention)
|
330 |
+
self.intermediate = BertIntermediate(config)
|
331 |
+
self.output = BertOutput(config)
|
332 |
+
|
333 |
+
def forward(
|
334 |
+
self,
|
335 |
+
hidden_states,
|
336 |
+
attention_mask=None,
|
337 |
+
head_mask=None,
|
338 |
+
encoder_hidden_states=None,
|
339 |
+
encoder_attention_mask=None,
|
340 |
+
past_key_value=None,
|
341 |
+
output_attentions=False,
|
342 |
+
mode=None,
|
343 |
+
):
|
344 |
+
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
|
345 |
+
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
|
346 |
+
self_attention_outputs = self.attention(
|
347 |
+
hidden_states,
|
348 |
+
attention_mask,
|
349 |
+
head_mask,
|
350 |
+
output_attentions=output_attentions,
|
351 |
+
past_key_value=self_attn_past_key_value,
|
352 |
+
)
|
353 |
+
attention_output = self_attention_outputs[0]
|
354 |
+
|
355 |
+
outputs = self_attention_outputs[1:-1]
|
356 |
+
present_key_value = self_attention_outputs[-1]
|
357 |
+
|
358 |
+
if mode=='multimodal':
|
359 |
+
assert encoder_hidden_states is not None, "encoder_hidden_states must be given for cross-attention layers"
|
360 |
+
|
361 |
+
cross_attention_outputs = self.crossattention(
|
362 |
+
attention_output,
|
363 |
+
attention_mask,
|
364 |
+
head_mask,
|
365 |
+
encoder_hidden_states,
|
366 |
+
encoder_attention_mask,
|
367 |
+
output_attentions=output_attentions,
|
368 |
+
)
|
369 |
+
attention_output = cross_attention_outputs[0]
|
370 |
+
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
|
371 |
+
layer_output = apply_chunking_to_forward(
|
372 |
+
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
|
373 |
+
)
|
374 |
+
outputs = (layer_output,) + outputs
|
375 |
+
|
376 |
+
outputs = outputs + (present_key_value,)
|
377 |
+
|
378 |
+
return outputs
|
379 |
+
|
380 |
+
def feed_forward_chunk(self, attention_output):
|
381 |
+
intermediate_output = self.intermediate(attention_output)
|
382 |
+
layer_output = self.output(intermediate_output, attention_output)
|
383 |
+
return layer_output
|
384 |
+
|
385 |
+
|
386 |
+
class BertEncoder(nn.Module):
|
387 |
+
def __init__(self, config):
|
388 |
+
super().__init__()
|
389 |
+
self.config = config
|
390 |
+
self.layer = nn.ModuleList([BertLayer(config,i) for i in range(config.num_hidden_layers)])
|
391 |
+
self.gradient_checkpointing = False
|
392 |
+
|
393 |
+
def forward(
|
394 |
+
self,
|
395 |
+
hidden_states,
|
396 |
+
attention_mask=None,
|
397 |
+
head_mask=None,
|
398 |
+
encoder_hidden_states=None,
|
399 |
+
encoder_attention_mask=None,
|
400 |
+
past_key_values=None,
|
401 |
+
use_cache=None,
|
402 |
+
output_attentions=False,
|
403 |
+
output_hidden_states=False,
|
404 |
+
return_dict=True,
|
405 |
+
mode='multimodal',
|
406 |
+
):
|
407 |
+
all_hidden_states = () if output_hidden_states else None
|
408 |
+
all_self_attentions = () if output_attentions else None
|
409 |
+
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
|
410 |
+
|
411 |
+
next_decoder_cache = () if use_cache else None
|
412 |
+
|
413 |
+
for i in range(self.config.num_hidden_layers):
|
414 |
+
layer_module = self.layer[i]
|
415 |
+
if output_hidden_states:
|
416 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
417 |
+
|
418 |
+
layer_head_mask = head_mask[i] if head_mask is not None else None
|
419 |
+
past_key_value = past_key_values[i] if past_key_values is not None else None
|
420 |
+
|
421 |
+
if self.gradient_checkpointing and self.training:
|
422 |
+
|
423 |
+
if use_cache:
|
424 |
+
logger.warn(
|
425 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
426 |
+
)
|
427 |
+
use_cache = False
|
428 |
+
|
429 |
+
def create_custom_forward(module):
|
430 |
+
def custom_forward(*inputs):
|
431 |
+
return module(*inputs, past_key_value, output_attentions)
|
432 |
+
|
433 |
+
return custom_forward
|
434 |
+
|
435 |
+
layer_outputs = torch.utils.checkpoint.checkpoint(
|
436 |
+
create_custom_forward(layer_module),
|
437 |
+
hidden_states,
|
438 |
+
attention_mask,
|
439 |
+
layer_head_mask,
|
440 |
+
encoder_hidden_states,
|
441 |
+
encoder_attention_mask,
|
442 |
+
mode=mode,
|
443 |
+
)
|
444 |
+
else:
|
445 |
+
layer_outputs = layer_module(
|
446 |
+
hidden_states,
|
447 |
+
attention_mask,
|
448 |
+
layer_head_mask,
|
449 |
+
encoder_hidden_states,
|
450 |
+
encoder_attention_mask,
|
451 |
+
past_key_value,
|
452 |
+
output_attentions,
|
453 |
+
mode=mode,
|
454 |
+
)
|
455 |
+
|
456 |
+
hidden_states = layer_outputs[0]
|
457 |
+
if use_cache:
|
458 |
+
next_decoder_cache += (layer_outputs[-1],)
|
459 |
+
if output_attentions:
|
460 |
+
all_self_attentions = all_self_attentions + (layer_outputs[1],)
|
461 |
+
|
462 |
+
if output_hidden_states:
|
463 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
464 |
+
|
465 |
+
if not return_dict:
|
466 |
+
return tuple(
|
467 |
+
v
|
468 |
+
for v in [
|
469 |
+
hidden_states,
|
470 |
+
next_decoder_cache,
|
471 |
+
all_hidden_states,
|
472 |
+
all_self_attentions,
|
473 |
+
all_cross_attentions,
|
474 |
+
]
|
475 |
+
if v is not None
|
476 |
+
)
|
477 |
+
return BaseModelOutputWithPastAndCrossAttentions(
|
478 |
+
last_hidden_state=hidden_states,
|
479 |
+
past_key_values=next_decoder_cache,
|
480 |
+
hidden_states=all_hidden_states,
|
481 |
+
attentions=all_self_attentions,
|
482 |
+
cross_attentions=all_cross_attentions,
|
483 |
+
)
|
484 |
+
|
485 |
+
|
486 |
+
class BertPooler(nn.Module):
|
487 |
+
def __init__(self, config):
|
488 |
+
super().__init__()
|
489 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
490 |
+
self.activation = nn.Tanh()
|
491 |
+
|
492 |
+
def forward(self, hidden_states):
|
493 |
+
# We "pool" the model by simply taking the hidden state corresponding
|
494 |
+
# to the first token.
|
495 |
+
first_token_tensor = hidden_states[:, 0]
|
496 |
+
pooled_output = self.dense(first_token_tensor)
|
497 |
+
pooled_output = self.activation(pooled_output)
|
498 |
+
return pooled_output
|
499 |
+
|
500 |
+
|
501 |
+
class BertPredictionHeadTransform(nn.Module):
|
502 |
+
def __init__(self, config):
|
503 |
+
super().__init__()
|
504 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
505 |
+
if isinstance(config.hidden_act, str):
|
506 |
+
self.transform_act_fn = ACT2FN[config.hidden_act]
|
507 |
+
else:
|
508 |
+
self.transform_act_fn = config.hidden_act
|
509 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
510 |
+
|
511 |
+
def forward(self, hidden_states):
|
512 |
+
hidden_states = self.dense(hidden_states)
|
513 |
+
hidden_states = self.transform_act_fn(hidden_states)
|
514 |
+
hidden_states = self.LayerNorm(hidden_states)
|
515 |
+
return hidden_states
|
516 |
+
|
517 |
+
|
518 |
+
class BertLMPredictionHead(nn.Module):
|
519 |
+
def __init__(self, config):
|
520 |
+
super().__init__()
|
521 |
+
self.transform = BertPredictionHeadTransform(config)
|
522 |
+
|
523 |
+
# The output weights are the same as the input embeddings, but there is
|
524 |
+
# an output-only bias for each token.
|
525 |
+
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
526 |
+
|
527 |
+
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
|
528 |
+
|
529 |
+
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
|
530 |
+
self.decoder.bias = self.bias
|
531 |
+
|
532 |
+
def forward(self, hidden_states):
|
533 |
+
hidden_states = self.transform(hidden_states)
|
534 |
+
hidden_states = self.decoder(hidden_states)
|
535 |
+
return hidden_states
|
536 |
+
|
537 |
+
|
538 |
+
class BertOnlyMLMHead(nn.Module):
|
539 |
+
def __init__(self, config):
|
540 |
+
super().__init__()
|
541 |
+
self.predictions = BertLMPredictionHead(config)
|
542 |
+
|
543 |
+
def forward(self, sequence_output):
|
544 |
+
prediction_scores = self.predictions(sequence_output)
|
545 |
+
return prediction_scores
|
546 |
+
|
547 |
+
|
548 |
+
class BertPreTrainedModel(PreTrainedModel):
|
549 |
+
"""
|
550 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
551 |
+
models.
|
552 |
+
"""
|
553 |
+
|
554 |
+
config_class = BertConfig
|
555 |
+
base_model_prefix = "bert"
|
556 |
+
_keys_to_ignore_on_load_missing = [r"position_ids"]
|
557 |
+
|
558 |
+
def _init_weights(self, module):
|
559 |
+
""" Initialize the weights """
|
560 |
+
if isinstance(module, (nn.Linear, nn.Embedding)):
|
561 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
562 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
563 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
564 |
+
elif isinstance(module, nn.LayerNorm):
|
565 |
+
module.bias.data.zero_()
|
566 |
+
module.weight.data.fill_(1.0)
|
567 |
+
if isinstance(module, nn.Linear) and module.bias is not None:
|
568 |
+
module.bias.data.zero_()
|
569 |
+
|
570 |
+
|
571 |
+
class BertModel(BertPreTrainedModel):
|
572 |
+
"""
|
573 |
+
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
|
574 |
+
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
|
575 |
+
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
|
576 |
+
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
|
577 |
+
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
|
578 |
+
input to the forward pass.
|
579 |
+
"""
|
580 |
+
|
581 |
+
def __init__(self, config, add_pooling_layer=True):
|
582 |
+
super().__init__(config)
|
583 |
+
self.config = config
|
584 |
+
|
585 |
+
self.embeddings = BertEmbeddings(config)
|
586 |
+
|
587 |
+
self.encoder = BertEncoder(config)
|
588 |
+
|
589 |
+
self.pooler = BertPooler(config) if add_pooling_layer else None
|
590 |
+
|
591 |
+
self.init_weights()
|
592 |
+
|
593 |
+
|
594 |
+
def get_input_embeddings(self):
|
595 |
+
return self.embeddings.word_embeddings
|
596 |
+
|
597 |
+
def set_input_embeddings(self, value):
|
598 |
+
self.embeddings.word_embeddings = value
|
599 |
+
|
600 |
+
def _prune_heads(self, heads_to_prune):
|
601 |
+
"""
|
602 |
+
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
|
603 |
+
class PreTrainedModel
|
604 |
+
"""
|
605 |
+
for layer, heads in heads_to_prune.items():
|
606 |
+
self.encoder.layer[layer].attention.prune_heads(heads)
|
607 |
+
|
608 |
+
|
609 |
+
def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool) -> Tensor:
|
610 |
+
"""
|
611 |
+
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
|
612 |
+
|
613 |
+
Arguments:
|
614 |
+
attention_mask (:obj:`torch.Tensor`):
|
615 |
+
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
|
616 |
+
input_shape (:obj:`Tuple[int]`):
|
617 |
+
The shape of the input to the model.
|
618 |
+
device: (:obj:`torch.device`):
|
619 |
+
The device of the input to the model.
|
620 |
+
|
621 |
+
Returns:
|
622 |
+
:obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
|
623 |
+
"""
|
624 |
+
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
|
625 |
+
# ourselves in which case we just need to make it broadcastable to all heads.
|
626 |
+
if attention_mask.dim() == 3:
|
627 |
+
extended_attention_mask = attention_mask[:, None, :, :]
|
628 |
+
elif attention_mask.dim() == 2:
|
629 |
+
# Provided a padding mask of dimensions [batch_size, seq_length]
|
630 |
+
# - if the model is a decoder, apply a causal mask in addition to the padding mask
|
631 |
+
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
|
632 |
+
if is_decoder:
|
633 |
+
batch_size, seq_length = input_shape
|
634 |
+
|
635 |
+
seq_ids = torch.arange(seq_length, device=device)
|
636 |
+
causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
|
637 |
+
# in case past_key_values are used we need to add a prefix ones mask to the causal mask
|
638 |
+
# causal and attention masks must have same type with pytorch version < 1.3
|
639 |
+
causal_mask = causal_mask.to(attention_mask.dtype)
|
640 |
+
|
641 |
+
if causal_mask.shape[1] < attention_mask.shape[1]:
|
642 |
+
prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
|
643 |
+
causal_mask = torch.cat(
|
644 |
+
[
|
645 |
+
torch.ones((batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype),
|
646 |
+
causal_mask,
|
647 |
+
],
|
648 |
+
axis=-1,
|
649 |
+
)
|
650 |
+
|
651 |
+
extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
|
652 |
+
else:
|
653 |
+
extended_attention_mask = attention_mask[:, None, None, :]
|
654 |
+
else:
|
655 |
+
raise ValueError(
|
656 |
+
"Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
|
657 |
+
input_shape, attention_mask.shape
|
658 |
+
)
|
659 |
+
)
|
660 |
+
|
661 |
+
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
|
662 |
+
# masked positions, this operation will create a tensor which is 0.0 for
|
663 |
+
# positions we want to attend and -10000.0 for masked positions.
|
664 |
+
# Since we are adding it to the raw scores before the softmax, this is
|
665 |
+
# effectively the same as removing these entirely.
|
666 |
+
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
|
667 |
+
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
|
668 |
+
return extended_attention_mask
|
669 |
+
|
670 |
+
def forward(
|
671 |
+
self,
|
672 |
+
input_ids=None,
|
673 |
+
attention_mask=None,
|
674 |
+
position_ids=None,
|
675 |
+
head_mask=None,
|
676 |
+
inputs_embeds=None,
|
677 |
+
encoder_embeds=None,
|
678 |
+
encoder_hidden_states=None,
|
679 |
+
encoder_attention_mask=None,
|
680 |
+
past_key_values=None,
|
681 |
+
use_cache=None,
|
682 |
+
output_attentions=None,
|
683 |
+
output_hidden_states=None,
|
684 |
+
return_dict=None,
|
685 |
+
is_decoder=False,
|
686 |
+
mode='multimodal',
|
687 |
+
):
|
688 |
+
r"""
|
689 |
+
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
|
690 |
+
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
|
691 |
+
the model is configured as a decoder.
|
692 |
+
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
|
693 |
+
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
|
694 |
+
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
|
695 |
+
- 1 for tokens that are **not masked**,
|
696 |
+
- 0 for tokens that are **masked**.
|
697 |
+
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
|
698 |
+
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
|
699 |
+
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
|
700 |
+
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
|
701 |
+
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
|
702 |
+
use_cache (:obj:`bool`, `optional`):
|
703 |
+
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
|
704 |
+
decoding (see :obj:`past_key_values`).
|
705 |
+
"""
|
706 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
707 |
+
output_hidden_states = (
|
708 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
709 |
+
)
|
710 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
711 |
+
|
712 |
+
if is_decoder:
|
713 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
714 |
+
else:
|
715 |
+
use_cache = False
|
716 |
+
|
717 |
+
if input_ids is not None and inputs_embeds is not None:
|
718 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
719 |
+
elif input_ids is not None:
|
720 |
+
input_shape = input_ids.size()
|
721 |
+
batch_size, seq_length = input_shape
|
722 |
+
device = input_ids.device
|
723 |
+
elif inputs_embeds is not None:
|
724 |
+
input_shape = inputs_embeds.size()[:-1]
|
725 |
+
batch_size, seq_length = input_shape
|
726 |
+
device = inputs_embeds.device
|
727 |
+
elif encoder_embeds is not None:
|
728 |
+
input_shape = encoder_embeds.size()[:-1]
|
729 |
+
batch_size, seq_length = input_shape
|
730 |
+
device = encoder_embeds.device
|
731 |
+
else:
|
732 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds or encoder_embeds")
|
733 |
+
|
734 |
+
# past_key_values_length
|
735 |
+
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
|
736 |
+
|
737 |
+
if attention_mask is None:
|
738 |
+
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
|
739 |
+
|
740 |
+
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
|
741 |
+
# ourselves in which case we just need to make it broadcastable to all heads.
|
742 |
+
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape,
|
743 |
+
device, is_decoder)
|
744 |
+
|
745 |
+
# If a 2D or 3D attention mask is provided for the cross-attention
|
746 |
+
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
|
747 |
+
if encoder_hidden_states is not None:
|
748 |
+
if type(encoder_hidden_states) == list:
|
749 |
+
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()
|
750 |
+
else:
|
751 |
+
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
|
752 |
+
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
|
753 |
+
|
754 |
+
if type(encoder_attention_mask) == list:
|
755 |
+
encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]
|
756 |
+
elif encoder_attention_mask is None:
|
757 |
+
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
|
758 |
+
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
|
759 |
+
else:
|
760 |
+
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
|
761 |
+
else:
|
762 |
+
encoder_extended_attention_mask = None
|
763 |
+
|
764 |
+
# Prepare head mask if needed
|
765 |
+
# 1.0 in head_mask indicate we keep the head
|
766 |
+
# attention_probs has shape bsz x n_heads x N x N
|
767 |
+
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
|
768 |
+
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
|
769 |
+
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
|
770 |
+
|
771 |
+
if encoder_embeds is None:
|
772 |
+
embedding_output = self.embeddings(
|
773 |
+
input_ids=input_ids,
|
774 |
+
position_ids=position_ids,
|
775 |
+
inputs_embeds=inputs_embeds,
|
776 |
+
past_key_values_length=past_key_values_length,
|
777 |
+
)
|
778 |
+
else:
|
779 |
+
embedding_output = encoder_embeds
|
780 |
+
|
781 |
+
encoder_outputs = self.encoder(
|
782 |
+
embedding_output,
|
783 |
+
attention_mask=extended_attention_mask,
|
784 |
+
head_mask=head_mask,
|
785 |
+
encoder_hidden_states=encoder_hidden_states,
|
786 |
+
encoder_attention_mask=encoder_extended_attention_mask,
|
787 |
+
past_key_values=past_key_values,
|
788 |
+
use_cache=use_cache,
|
789 |
+
output_attentions=output_attentions,
|
790 |
+
output_hidden_states=output_hidden_states,
|
791 |
+
return_dict=return_dict,
|
792 |
+
mode=mode,
|
793 |
+
)
|
794 |
+
sequence_output = encoder_outputs[0]
|
795 |
+
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
|
796 |
+
|
797 |
+
if not return_dict:
|
798 |
+
return (sequence_output, pooled_output) + encoder_outputs[1:]
|
799 |
+
|
800 |
+
return BaseModelOutputWithPoolingAndCrossAttentions(
|
801 |
+
last_hidden_state=sequence_output,
|
802 |
+
pooler_output=pooled_output,
|
803 |
+
past_key_values=encoder_outputs.past_key_values,
|
804 |
+
hidden_states=encoder_outputs.hidden_states,
|
805 |
+
attentions=encoder_outputs.attentions,
|
806 |
+
cross_attentions=encoder_outputs.cross_attentions,
|
807 |
+
)
|
808 |
+
|
809 |
+
|
810 |
+
|
811 |
+
class BertLMHeadModel(BertPreTrainedModel):
|
812 |
+
|
813 |
+
_keys_to_ignore_on_load_unexpected = [r"pooler"]
|
814 |
+
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
|
815 |
+
|
816 |
+
def __init__(self, config):
|
817 |
+
super().__init__(config)
|
818 |
+
|
819 |
+
self.bert = BertModel(config, add_pooling_layer=False)
|
820 |
+
self.cls = BertOnlyMLMHead(config)
|
821 |
+
|
822 |
+
self.init_weights()
|
823 |
+
|
824 |
+
def get_output_embeddings(self):
|
825 |
+
return self.cls.predictions.decoder
|
826 |
+
|
827 |
+
def set_output_embeddings(self, new_embeddings):
|
828 |
+
self.cls.predictions.decoder = new_embeddings
|
829 |
+
|
830 |
+
def forward(
|
831 |
+
self,
|
832 |
+
input_ids=None,
|
833 |
+
attention_mask=None,
|
834 |
+
position_ids=None,
|
835 |
+
head_mask=None,
|
836 |
+
inputs_embeds=None,
|
837 |
+
encoder_hidden_states=None,
|
838 |
+
encoder_attention_mask=None,
|
839 |
+
labels=None,
|
840 |
+
past_key_values=None,
|
841 |
+
use_cache=None,
|
842 |
+
output_attentions=None,
|
843 |
+
output_hidden_states=None,
|
844 |
+
return_dict=None,
|
845 |
+
return_logits=False,
|
846 |
+
is_decoder=True,
|
847 |
+
reduction='mean',
|
848 |
+
mode='multimodal',
|
849 |
+
):
|
850 |
+
r"""
|
851 |
+
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
|
852 |
+
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
|
853 |
+
the model is configured as a decoder.
|
854 |
+
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
|
855 |
+
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
|
856 |
+
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
|
857 |
+
- 1 for tokens that are **not masked**,
|
858 |
+
- 0 for tokens that are **masked**.
|
859 |
+
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
|
860 |
+
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
|
861 |
+
``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
|
862 |
+
ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``
|
863 |
+
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
|
864 |
+
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
|
865 |
+
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
|
866 |
+
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
|
867 |
+
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
|
868 |
+
use_cache (:obj:`bool`, `optional`):
|
869 |
+
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
|
870 |
+
decoding (see :obj:`past_key_values`).
|
871 |
+
Returns:
|
872 |
+
Example::
|
873 |
+
>>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig
|
874 |
+
>>> import torch
|
875 |
+
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
|
876 |
+
>>> config = BertConfig.from_pretrained("bert-base-cased")
|
877 |
+
>>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config)
|
878 |
+
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
|
879 |
+
>>> outputs = model(**inputs)
|
880 |
+
>>> prediction_logits = outputs.logits
|
881 |
+
"""
|
882 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
883 |
+
if labels is not None:
|
884 |
+
use_cache = False
|
885 |
+
|
886 |
+
outputs = self.bert(
|
887 |
+
input_ids,
|
888 |
+
attention_mask=attention_mask,
|
889 |
+
position_ids=position_ids,
|
890 |
+
head_mask=head_mask,
|
891 |
+
inputs_embeds=inputs_embeds,
|
892 |
+
encoder_hidden_states=encoder_hidden_states,
|
893 |
+
encoder_attention_mask=encoder_attention_mask,
|
894 |
+
past_key_values=past_key_values,
|
895 |
+
use_cache=use_cache,
|
896 |
+
output_attentions=output_attentions,
|
897 |
+
output_hidden_states=output_hidden_states,
|
898 |
+
return_dict=return_dict,
|
899 |
+
is_decoder=is_decoder,
|
900 |
+
mode=mode,
|
901 |
+
)
|
902 |
+
|
903 |
+
sequence_output = outputs[0]
|
904 |
+
prediction_scores = self.cls(sequence_output)
|
905 |
+
|
906 |
+
if return_logits:
|
907 |
+
return prediction_scores[:, :-1, :].contiguous()
|
908 |
+
|
909 |
+
lm_loss = None
|
910 |
+
if labels is not None:
|
911 |
+
# we are doing next-token prediction; shift prediction scores and input ids by one
|
912 |
+
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
|
913 |
+
labels = labels[:, 1:].contiguous()
|
914 |
+
loss_fct = CrossEntropyLoss(reduction=reduction, label_smoothing=0.1)
|
915 |
+
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
|
916 |
+
if reduction=='none':
|
917 |
+
lm_loss = lm_loss.view(prediction_scores.size(0),-1).sum(1)
|
918 |
+
|
919 |
+
if not return_dict:
|
920 |
+
output = (prediction_scores,) + outputs[2:]
|
921 |
+
return ((lm_loss,) + output) if lm_loss is not None else output
|
922 |
+
|
923 |
+
return CausalLMOutputWithCrossAttentions(
|
924 |
+
loss=lm_loss,
|
925 |
+
logits=prediction_scores,
|
926 |
+
past_key_values=outputs.past_key_values,
|
927 |
+
hidden_states=outputs.hidden_states,
|
928 |
+
attentions=outputs.attentions,
|
929 |
+
cross_attentions=outputs.cross_attentions,
|
930 |
+
)
|
931 |
+
|
932 |
+
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
|
933 |
+
input_shape = input_ids.shape
|
934 |
+
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
|
935 |
+
if attention_mask is None:
|
936 |
+
attention_mask = input_ids.new_ones(input_shape)
|
937 |
+
|
938 |
+
# cut decoder_input_ids if past is used
|
939 |
+
if past is not None:
|
940 |
+
input_ids = input_ids[:, -1:]
|
941 |
+
|
942 |
+
return {
|
943 |
+
"input_ids": input_ids,
|
944 |
+
"attention_mask": attention_mask,
|
945 |
+
"past_key_values": past,
|
946 |
+
"encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None),
|
947 |
+
"encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None),
|
948 |
+
"is_decoder": True,
|
949 |
+
}
|
950 |
+
|
951 |
+
def _reorder_cache(self, past, beam_idx):
|
952 |
+
reordered_past = ()
|
953 |
+
for layer_past in past:
|
954 |
+
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
|
955 |
+
return reordered_past
|
extras/BLIP/models/nlvr_encoder.py
ADDED
@@ -0,0 +1,843 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
import os
|
3 |
+
import warnings
|
4 |
+
from dataclasses import dataclass
|
5 |
+
from typing import Optional, Tuple
|
6 |
+
|
7 |
+
import torch
|
8 |
+
from torch import Tensor, device, dtype, nn
|
9 |
+
import torch.utils.checkpoint
|
10 |
+
from torch import nn
|
11 |
+
from torch.nn import CrossEntropyLoss
|
12 |
+
import torch.nn.functional as F
|
13 |
+
|
14 |
+
from transformers.activations import ACT2FN
|
15 |
+
from transformers.file_utils import (
|
16 |
+
ModelOutput,
|
17 |
+
)
|
18 |
+
from transformers.modeling_outputs import (
|
19 |
+
BaseModelOutputWithPastAndCrossAttentions,
|
20 |
+
BaseModelOutputWithPoolingAndCrossAttentions,
|
21 |
+
CausalLMOutputWithCrossAttentions,
|
22 |
+
MaskedLMOutput,
|
23 |
+
MultipleChoiceModelOutput,
|
24 |
+
NextSentencePredictorOutput,
|
25 |
+
QuestionAnsweringModelOutput,
|
26 |
+
SequenceClassifierOutput,
|
27 |
+
TokenClassifierOutput,
|
28 |
+
)
|
29 |
+
from transformers.modeling_utils import (
|
30 |
+
PreTrainedModel,
|
31 |
+
apply_chunking_to_forward,
|
32 |
+
find_pruneable_heads_and_indices,
|
33 |
+
prune_linear_layer,
|
34 |
+
)
|
35 |
+
from transformers.utils import logging
|
36 |
+
from transformers.models.bert.configuration_bert import BertConfig
|
37 |
+
|
38 |
+
|
39 |
+
logger = logging.get_logger(__name__)
|
40 |
+
|
41 |
+
|
42 |
+
class BertEmbeddings(nn.Module):
|
43 |
+
"""Construct the embeddings from word and position embeddings."""
|
44 |
+
|
45 |
+
def __init__(self, config):
|
46 |
+
super().__init__()
|
47 |
+
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
|
48 |
+
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
|
49 |
+
|
50 |
+
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
|
51 |
+
# any TensorFlow checkpoint file
|
52 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
53 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
54 |
+
|
55 |
+
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
|
56 |
+
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
|
57 |
+
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
|
58 |
+
|
59 |
+
self.config = config
|
60 |
+
|
61 |
+
def forward(
|
62 |
+
self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
|
63 |
+
):
|
64 |
+
if input_ids is not None:
|
65 |
+
input_shape = input_ids.size()
|
66 |
+
else:
|
67 |
+
input_shape = inputs_embeds.size()[:-1]
|
68 |
+
|
69 |
+
seq_length = input_shape[1]
|
70 |
+
|
71 |
+
if position_ids is None:
|
72 |
+
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
|
73 |
+
|
74 |
+
if inputs_embeds is None:
|
75 |
+
inputs_embeds = self.word_embeddings(input_ids)
|
76 |
+
|
77 |
+
embeddings = inputs_embeds
|
78 |
+
|
79 |
+
if self.position_embedding_type == "absolute":
|
80 |
+
position_embeddings = self.position_embeddings(position_ids)
|
81 |
+
embeddings += position_embeddings
|
82 |
+
embeddings = self.LayerNorm(embeddings)
|
83 |
+
embeddings = self.dropout(embeddings)
|
84 |
+
return embeddings
|
85 |
+
|
86 |
+
|
87 |
+
class BertSelfAttention(nn.Module):
|
88 |
+
def __init__(self, config, is_cross_attention):
|
89 |
+
super().__init__()
|
90 |
+
self.config = config
|
91 |
+
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
|
92 |
+
raise ValueError(
|
93 |
+
"The hidden size (%d) is not a multiple of the number of attention "
|
94 |
+
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
|
95 |
+
)
|
96 |
+
|
97 |
+
self.num_attention_heads = config.num_attention_heads
|
98 |
+
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
|
99 |
+
self.all_head_size = self.num_attention_heads * self.attention_head_size
|
100 |
+
|
101 |
+
self.query = nn.Linear(config.hidden_size, self.all_head_size)
|
102 |
+
if is_cross_attention:
|
103 |
+
self.key = nn.Linear(config.encoder_width, self.all_head_size)
|
104 |
+
self.value = nn.Linear(config.encoder_width, self.all_head_size)
|
105 |
+
else:
|
106 |
+
self.key = nn.Linear(config.hidden_size, self.all_head_size)
|
107 |
+
self.value = nn.Linear(config.hidden_size, self.all_head_size)
|
108 |
+
|
109 |
+
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
|
110 |
+
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
|
111 |
+
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
|
112 |
+
self.max_position_embeddings = config.max_position_embeddings
|
113 |
+
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
|
114 |
+
self.save_attention = False
|
115 |
+
|
116 |
+
def save_attn_gradients(self, attn_gradients):
|
117 |
+
self.attn_gradients = attn_gradients
|
118 |
+
|
119 |
+
def get_attn_gradients(self):
|
120 |
+
return self.attn_gradients
|
121 |
+
|
122 |
+
def save_attention_map(self, attention_map):
|
123 |
+
self.attention_map = attention_map
|
124 |
+
|
125 |
+
def get_attention_map(self):
|
126 |
+
return self.attention_map
|
127 |
+
|
128 |
+
def transpose_for_scores(self, x):
|
129 |
+
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
|
130 |
+
x = x.view(*new_x_shape)
|
131 |
+
return x.permute(0, 2, 1, 3)
|
132 |
+
|
133 |
+
def forward(
|
134 |
+
self,
|
135 |
+
hidden_states,
|
136 |
+
attention_mask=None,
|
137 |
+
head_mask=None,
|
138 |
+
encoder_hidden_states=None,
|
139 |
+
encoder_attention_mask=None,
|
140 |
+
past_key_value=None,
|
141 |
+
output_attentions=False,
|
142 |
+
):
|
143 |
+
mixed_query_layer = self.query(hidden_states)
|
144 |
+
|
145 |
+
# If this is instantiated as a cross-attention module, the keys
|
146 |
+
# and values come from an encoder; the attention mask needs to be
|
147 |
+
# such that the encoder's padding tokens are not attended to.
|
148 |
+
is_cross_attention = encoder_hidden_states is not None
|
149 |
+
|
150 |
+
if is_cross_attention:
|
151 |
+
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
|
152 |
+
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
|
153 |
+
attention_mask = encoder_attention_mask
|
154 |
+
elif past_key_value is not None:
|
155 |
+
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
156 |
+
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
157 |
+
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
|
158 |
+
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
|
159 |
+
else:
|
160 |
+
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
161 |
+
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
162 |
+
|
163 |
+
query_layer = self.transpose_for_scores(mixed_query_layer)
|
164 |
+
|
165 |
+
past_key_value = (key_layer, value_layer)
|
166 |
+
|
167 |
+
# Take the dot product between "query" and "key" to get the raw attention scores.
|
168 |
+
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
|
169 |
+
|
170 |
+
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
|
171 |
+
seq_length = hidden_states.size()[1]
|
172 |
+
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
|
173 |
+
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
|
174 |
+
distance = position_ids_l - position_ids_r
|
175 |
+
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
|
176 |
+
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
|
177 |
+
|
178 |
+
if self.position_embedding_type == "relative_key":
|
179 |
+
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
|
180 |
+
attention_scores = attention_scores + relative_position_scores
|
181 |
+
elif self.position_embedding_type == "relative_key_query":
|
182 |
+
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
|
183 |
+
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
|
184 |
+
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
|
185 |
+
|
186 |
+
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
|
187 |
+
if attention_mask is not None:
|
188 |
+
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
|
189 |
+
attention_scores = attention_scores + attention_mask
|
190 |
+
|
191 |
+
# Normalize the attention scores to probabilities.
|
192 |
+
attention_probs = nn.Softmax(dim=-1)(attention_scores)
|
193 |
+
|
194 |
+
if is_cross_attention and self.save_attention:
|
195 |
+
self.save_attention_map(attention_probs)
|
196 |
+
attention_probs.register_hook(self.save_attn_gradients)
|
197 |
+
|
198 |
+
# This is actually dropping out entire tokens to attend to, which might
|
199 |
+
# seem a bit unusual, but is taken from the original Transformer paper.
|
200 |
+
attention_probs_dropped = self.dropout(attention_probs)
|
201 |
+
|
202 |
+
# Mask heads if we want to
|
203 |
+
if head_mask is not None:
|
204 |
+
attention_probs_dropped = attention_probs_dropped * head_mask
|
205 |
+
|
206 |
+
context_layer = torch.matmul(attention_probs_dropped, value_layer)
|
207 |
+
|
208 |
+
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
|
209 |
+
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
|
210 |
+
context_layer = context_layer.view(*new_context_layer_shape)
|
211 |
+
|
212 |
+
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
|
213 |
+
|
214 |
+
outputs = outputs + (past_key_value,)
|
215 |
+
return outputs
|
216 |
+
|
217 |
+
|
218 |
+
class BertSelfOutput(nn.Module):
|
219 |
+
def __init__(self, config, twin=False, merge=False):
|
220 |
+
super().__init__()
|
221 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
222 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
223 |
+
if twin:
|
224 |
+
self.dense0 = nn.Linear(config.hidden_size, config.hidden_size)
|
225 |
+
self.dense1 = nn.Linear(config.hidden_size, config.hidden_size)
|
226 |
+
else:
|
227 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
228 |
+
if merge:
|
229 |
+
self.act = ACT2FN[config.hidden_act]
|
230 |
+
self.merge_layer = nn.Linear(config.hidden_size * 2, config.hidden_size)
|
231 |
+
self.merge = True
|
232 |
+
else:
|
233 |
+
self.merge = False
|
234 |
+
|
235 |
+
def forward(self, hidden_states, input_tensor):
|
236 |
+
if type(hidden_states) == list:
|
237 |
+
hidden_states0 = self.dense0(hidden_states[0])
|
238 |
+
hidden_states1 = self.dense1(hidden_states[1])
|
239 |
+
if self.merge:
|
240 |
+
#hidden_states = self.merge_layer(self.act(torch.cat([hidden_states0,hidden_states1],dim=-1)))
|
241 |
+
hidden_states = self.merge_layer(torch.cat([hidden_states0,hidden_states1],dim=-1))
|
242 |
+
else:
|
243 |
+
hidden_states = (hidden_states0+hidden_states1)/2
|
244 |
+
else:
|
245 |
+
hidden_states = self.dense(hidden_states)
|
246 |
+
hidden_states = self.dropout(hidden_states)
|
247 |
+
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
248 |
+
return hidden_states
|
249 |
+
|
250 |
+
|
251 |
+
class BertAttention(nn.Module):
|
252 |
+
def __init__(self, config, is_cross_attention=False, layer_num=-1):
|
253 |
+
super().__init__()
|
254 |
+
if is_cross_attention:
|
255 |
+
self.self0 = BertSelfAttention(config, is_cross_attention)
|
256 |
+
self.self1 = BertSelfAttention(config, is_cross_attention)
|
257 |
+
else:
|
258 |
+
self.self = BertSelfAttention(config, is_cross_attention)
|
259 |
+
self.output = BertSelfOutput(config, twin=is_cross_attention, merge=(is_cross_attention and layer_num>=6))
|
260 |
+
self.pruned_heads = set()
|
261 |
+
|
262 |
+
def prune_heads(self, heads):
|
263 |
+
if len(heads) == 0:
|
264 |
+
return
|
265 |
+
heads, index = find_pruneable_heads_and_indices(
|
266 |
+
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
|
267 |
+
)
|
268 |
+
|
269 |
+
# Prune linear layers
|
270 |
+
self.self.query = prune_linear_layer(self.self.query, index)
|
271 |
+
self.self.key = prune_linear_layer(self.self.key, index)
|
272 |
+
self.self.value = prune_linear_layer(self.self.value, index)
|
273 |
+
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
|
274 |
+
|
275 |
+
# Update hyper params and store pruned heads
|
276 |
+
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
|
277 |
+
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
|
278 |
+
self.pruned_heads = self.pruned_heads.union(heads)
|
279 |
+
|
280 |
+
def forward(
|
281 |
+
self,
|
282 |
+
hidden_states,
|
283 |
+
attention_mask=None,
|
284 |
+
head_mask=None,
|
285 |
+
encoder_hidden_states=None,
|
286 |
+
encoder_attention_mask=None,
|
287 |
+
past_key_value=None,
|
288 |
+
output_attentions=False,
|
289 |
+
):
|
290 |
+
if type(encoder_hidden_states)==list:
|
291 |
+
self_outputs0 = self.self0(
|
292 |
+
hidden_states,
|
293 |
+
attention_mask,
|
294 |
+
head_mask,
|
295 |
+
encoder_hidden_states[0],
|
296 |
+
encoder_attention_mask[0],
|
297 |
+
past_key_value,
|
298 |
+
output_attentions,
|
299 |
+
)
|
300 |
+
self_outputs1 = self.self1(
|
301 |
+
hidden_states,
|
302 |
+
attention_mask,
|
303 |
+
head_mask,
|
304 |
+
encoder_hidden_states[1],
|
305 |
+
encoder_attention_mask[1],
|
306 |
+
past_key_value,
|
307 |
+
output_attentions,
|
308 |
+
)
|
309 |
+
attention_output = self.output([self_outputs0[0],self_outputs1[0]], hidden_states)
|
310 |
+
|
311 |
+
outputs = (attention_output,) + self_outputs0[1:] # add attentions if we output them
|
312 |
+
else:
|
313 |
+
self_outputs = self.self(
|
314 |
+
hidden_states,
|
315 |
+
attention_mask,
|
316 |
+
head_mask,
|
317 |
+
encoder_hidden_states,
|
318 |
+
encoder_attention_mask,
|
319 |
+
past_key_value,
|
320 |
+
output_attentions,
|
321 |
+
)
|
322 |
+
attention_output = self.output(self_outputs[0], hidden_states)
|
323 |
+
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
|
324 |
+
return outputs
|
325 |
+
|
326 |
+
|
327 |
+
class BertIntermediate(nn.Module):
|
328 |
+
def __init__(self, config):
|
329 |
+
super().__init__()
|
330 |
+
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
|
331 |
+
if isinstance(config.hidden_act, str):
|
332 |
+
self.intermediate_act_fn = ACT2FN[config.hidden_act]
|
333 |
+
else:
|
334 |
+
self.intermediate_act_fn = config.hidden_act
|
335 |
+
|
336 |
+
def forward(self, hidden_states):
|
337 |
+
hidden_states = self.dense(hidden_states)
|
338 |
+
hidden_states = self.intermediate_act_fn(hidden_states)
|
339 |
+
return hidden_states
|
340 |
+
|
341 |
+
|
342 |
+
class BertOutput(nn.Module):
|
343 |
+
def __init__(self, config):
|
344 |
+
super().__init__()
|
345 |
+
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
|
346 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
347 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
348 |
+
|
349 |
+
def forward(self, hidden_states, input_tensor):
|
350 |
+
hidden_states = self.dense(hidden_states)
|
351 |
+
hidden_states = self.dropout(hidden_states)
|
352 |
+
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
353 |
+
return hidden_states
|
354 |
+
|
355 |
+
|
356 |
+
class BertLayer(nn.Module):
|
357 |
+
def __init__(self, config, layer_num):
|
358 |
+
super().__init__()
|
359 |
+
self.config = config
|
360 |
+
self.chunk_size_feed_forward = config.chunk_size_feed_forward
|
361 |
+
self.seq_len_dim = 1
|
362 |
+
self.attention = BertAttention(config)
|
363 |
+
self.layer_num = layer_num
|
364 |
+
if self.config.add_cross_attention:
|
365 |
+
self.crossattention = BertAttention(config, is_cross_attention=self.config.add_cross_attention, layer_num=layer_num)
|
366 |
+
self.intermediate = BertIntermediate(config)
|
367 |
+
self.output = BertOutput(config)
|
368 |
+
|
369 |
+
def forward(
|
370 |
+
self,
|
371 |
+
hidden_states,
|
372 |
+
attention_mask=None,
|
373 |
+
head_mask=None,
|
374 |
+
encoder_hidden_states=None,
|
375 |
+
encoder_attention_mask=None,
|
376 |
+
past_key_value=None,
|
377 |
+
output_attentions=False,
|
378 |
+
mode=None,
|
379 |
+
):
|
380 |
+
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
|
381 |
+
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
|
382 |
+
self_attention_outputs = self.attention(
|
383 |
+
hidden_states,
|
384 |
+
attention_mask,
|
385 |
+
head_mask,
|
386 |
+
output_attentions=output_attentions,
|
387 |
+
past_key_value=self_attn_past_key_value,
|
388 |
+
)
|
389 |
+
attention_output = self_attention_outputs[0]
|
390 |
+
|
391 |
+
outputs = self_attention_outputs[1:-1]
|
392 |
+
present_key_value = self_attention_outputs[-1]
|
393 |
+
|
394 |
+
if mode=='multimodal':
|
395 |
+
assert encoder_hidden_states is not None, "encoder_hidden_states must be given for cross-attention layers"
|
396 |
+
cross_attention_outputs = self.crossattention(
|
397 |
+
attention_output,
|
398 |
+
attention_mask,
|
399 |
+
head_mask,
|
400 |
+
encoder_hidden_states,
|
401 |
+
encoder_attention_mask,
|
402 |
+
output_attentions=output_attentions,
|
403 |
+
)
|
404 |
+
attention_output = cross_attention_outputs[0]
|
405 |
+
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
|
406 |
+
layer_output = apply_chunking_to_forward(
|
407 |
+
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
|
408 |
+
)
|
409 |
+
outputs = (layer_output,) + outputs
|
410 |
+
|
411 |
+
outputs = outputs + (present_key_value,)
|
412 |
+
|
413 |
+
return outputs
|
414 |
+
|
415 |
+
def feed_forward_chunk(self, attention_output):
|
416 |
+
intermediate_output = self.intermediate(attention_output)
|
417 |
+
layer_output = self.output(intermediate_output, attention_output)
|
418 |
+
return layer_output
|
419 |
+
|
420 |
+
|
421 |
+
class BertEncoder(nn.Module):
|
422 |
+
def __init__(self, config):
|
423 |
+
super().__init__()
|
424 |
+
self.config = config
|
425 |
+
self.layer = nn.ModuleList([BertLayer(config,i) for i in range(config.num_hidden_layers)])
|
426 |
+
self.gradient_checkpointing = False
|
427 |
+
|
428 |
+
def forward(
|
429 |
+
self,
|
430 |
+
hidden_states,
|
431 |
+
attention_mask=None,
|
432 |
+
head_mask=None,
|
433 |
+
encoder_hidden_states=None,
|
434 |
+
encoder_attention_mask=None,
|
435 |
+
past_key_values=None,
|
436 |
+
use_cache=None,
|
437 |
+
output_attentions=False,
|
438 |
+
output_hidden_states=False,
|
439 |
+
return_dict=True,
|
440 |
+
mode='multimodal',
|
441 |
+
):
|
442 |
+
all_hidden_states = () if output_hidden_states else None
|
443 |
+
all_self_attentions = () if output_attentions else None
|
444 |
+
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
|
445 |
+
|
446 |
+
next_decoder_cache = () if use_cache else None
|
447 |
+
|
448 |
+
for i in range(self.config.num_hidden_layers):
|
449 |
+
layer_module = self.layer[i]
|
450 |
+
if output_hidden_states:
|
451 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
452 |
+
|
453 |
+
layer_head_mask = head_mask[i] if head_mask is not None else None
|
454 |
+
past_key_value = past_key_values[i] if past_key_values is not None else None
|
455 |
+
|
456 |
+
if self.gradient_checkpointing and self.training:
|
457 |
+
|
458 |
+
if use_cache:
|
459 |
+
logger.warn(
|
460 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
461 |
+
)
|
462 |
+
use_cache = False
|
463 |
+
|
464 |
+
def create_custom_forward(module):
|
465 |
+
def custom_forward(*inputs):
|
466 |
+
return module(*inputs, past_key_value, output_attentions)
|
467 |
+
|
468 |
+
return custom_forward
|
469 |
+
|
470 |
+
layer_outputs = torch.utils.checkpoint.checkpoint(
|
471 |
+
create_custom_forward(layer_module),
|
472 |
+
hidden_states,
|
473 |
+
attention_mask,
|
474 |
+
layer_head_mask,
|
475 |
+
encoder_hidden_states,
|
476 |
+
encoder_attention_mask,
|
477 |
+
mode=mode,
|
478 |
+
)
|
479 |
+
else:
|
480 |
+
layer_outputs = layer_module(
|
481 |
+
hidden_states,
|
482 |
+
attention_mask,
|
483 |
+
layer_head_mask,
|
484 |
+
encoder_hidden_states,
|
485 |
+
encoder_attention_mask,
|
486 |
+
past_key_value,
|
487 |
+
output_attentions,
|
488 |
+
mode=mode,
|
489 |
+
)
|
490 |
+
|
491 |
+
hidden_states = layer_outputs[0]
|
492 |
+
if use_cache:
|
493 |
+
next_decoder_cache += (layer_outputs[-1],)
|
494 |
+
if output_attentions:
|
495 |
+
all_self_attentions = all_self_attentions + (layer_outputs[1],)
|
496 |
+
|
497 |
+
if output_hidden_states:
|
498 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
499 |
+
|
500 |
+
if not return_dict:
|
501 |
+
return tuple(
|
502 |
+
v
|
503 |
+
for v in [
|
504 |
+
hidden_states,
|
505 |
+
next_decoder_cache,
|
506 |
+
all_hidden_states,
|
507 |
+
all_self_attentions,
|
508 |
+
all_cross_attentions,
|
509 |
+
]
|
510 |
+
if v is not None
|
511 |
+
)
|
512 |
+
return BaseModelOutputWithPastAndCrossAttentions(
|
513 |
+
last_hidden_state=hidden_states,
|
514 |
+
past_key_values=next_decoder_cache,
|
515 |
+
hidden_states=all_hidden_states,
|
516 |
+
attentions=all_self_attentions,
|
517 |
+
cross_attentions=all_cross_attentions,
|
518 |
+
)
|
519 |
+
|
520 |
+
|
521 |
+
class BertPooler(nn.Module):
|
522 |
+
def __init__(self, config):
|
523 |
+
super().__init__()
|
524 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
525 |
+
self.activation = nn.Tanh()
|
526 |
+
|
527 |
+
def forward(self, hidden_states):
|
528 |
+
# We "pool" the model by simply taking the hidden state corresponding
|
529 |
+
# to the first token.
|
530 |
+
first_token_tensor = hidden_states[:, 0]
|
531 |
+
pooled_output = self.dense(first_token_tensor)
|
532 |
+
pooled_output = self.activation(pooled_output)
|
533 |
+
return pooled_output
|
534 |
+
|
535 |
+
|
536 |
+
class BertPredictionHeadTransform(nn.Module):
|
537 |
+
def __init__(self, config):
|
538 |
+
super().__init__()
|
539 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
540 |
+
if isinstance(config.hidden_act, str):
|
541 |
+
self.transform_act_fn = ACT2FN[config.hidden_act]
|
542 |
+
else:
|
543 |
+
self.transform_act_fn = config.hidden_act
|
544 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
545 |
+
|
546 |
+
def forward(self, hidden_states):
|
547 |
+
hidden_states = self.dense(hidden_states)
|
548 |
+
hidden_states = self.transform_act_fn(hidden_states)
|
549 |
+
hidden_states = self.LayerNorm(hidden_states)
|
550 |
+
return hidden_states
|
551 |
+
|
552 |
+
|
553 |
+
class BertLMPredictionHead(nn.Module):
|
554 |
+
def __init__(self, config):
|
555 |
+
super().__init__()
|
556 |
+
self.transform = BertPredictionHeadTransform(config)
|
557 |
+
|
558 |
+
# The output weights are the same as the input embeddings, but there is
|
559 |
+
# an output-only bias for each token.
|
560 |
+
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
561 |
+
|
562 |
+
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
|
563 |
+
|
564 |
+
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
|
565 |
+
self.decoder.bias = self.bias
|
566 |
+
|
567 |
+
def forward(self, hidden_states):
|
568 |
+
hidden_states = self.transform(hidden_states)
|
569 |
+
hidden_states = self.decoder(hidden_states)
|
570 |
+
return hidden_states
|
571 |
+
|
572 |
+
|
573 |
+
class BertOnlyMLMHead(nn.Module):
|
574 |
+
def __init__(self, config):
|
575 |
+
super().__init__()
|
576 |
+
self.predictions = BertLMPredictionHead(config)
|
577 |
+
|
578 |
+
def forward(self, sequence_output):
|
579 |
+
prediction_scores = self.predictions(sequence_output)
|
580 |
+
return prediction_scores
|
581 |
+
|
582 |
+
|
583 |
+
class BertPreTrainedModel(PreTrainedModel):
|
584 |
+
"""
|
585 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
586 |
+
models.
|
587 |
+
"""
|
588 |
+
|
589 |
+
config_class = BertConfig
|
590 |
+
base_model_prefix = "bert"
|
591 |
+
_keys_to_ignore_on_load_missing = [r"position_ids"]
|
592 |
+
|
593 |
+
def _init_weights(self, module):
|
594 |
+
""" Initialize the weights """
|
595 |
+
if isinstance(module, (nn.Linear, nn.Embedding)):
|
596 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
597 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
598 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
599 |
+
elif isinstance(module, nn.LayerNorm):
|
600 |
+
module.bias.data.zero_()
|
601 |
+
module.weight.data.fill_(1.0)
|
602 |
+
if isinstance(module, nn.Linear) and module.bias is not None:
|
603 |
+
module.bias.data.zero_()
|
604 |
+
|
605 |
+
|
606 |
+
class BertModel(BertPreTrainedModel):
|
607 |
+
"""
|
608 |
+
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
|
609 |
+
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
|
610 |
+
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
|
611 |
+
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
|
612 |
+
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
|
613 |
+
input to the forward pass.
|
614 |
+
"""
|
615 |
+
|
616 |
+
def __init__(self, config, add_pooling_layer=True):
|
617 |
+
super().__init__(config)
|
618 |
+
self.config = config
|
619 |
+
|
620 |
+
self.embeddings = BertEmbeddings(config)
|
621 |
+
|
622 |
+
self.encoder = BertEncoder(config)
|
623 |
+
|
624 |
+
self.pooler = BertPooler(config) if add_pooling_layer else None
|
625 |
+
|
626 |
+
self.init_weights()
|
627 |
+
|
628 |
+
|
629 |
+
def get_input_embeddings(self):
|
630 |
+
return self.embeddings.word_embeddings
|
631 |
+
|
632 |
+
def set_input_embeddings(self, value):
|
633 |
+
self.embeddings.word_embeddings = value
|
634 |
+
|
635 |
+
def _prune_heads(self, heads_to_prune):
|
636 |
+
"""
|
637 |
+
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
|
638 |
+
class PreTrainedModel
|
639 |
+
"""
|
640 |
+
for layer, heads in heads_to_prune.items():
|
641 |
+
self.encoder.layer[layer].attention.prune_heads(heads)
|
642 |
+
|
643 |
+
|
644 |
+
def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool) -> Tensor:
|
645 |
+
"""
|
646 |
+
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
|
647 |
+
|
648 |
+
Arguments:
|
649 |
+
attention_mask (:obj:`torch.Tensor`):
|
650 |
+
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
|
651 |
+
input_shape (:obj:`Tuple[int]`):
|
652 |
+
The shape of the input to the model.
|
653 |
+
device: (:obj:`torch.device`):
|
654 |
+
The device of the input to the model.
|
655 |
+
|
656 |
+
Returns:
|
657 |
+
:obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
|
658 |
+
"""
|
659 |
+
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
|
660 |
+
# ourselves in which case we just need to make it broadcastable to all heads.
|
661 |
+
if attention_mask.dim() == 3:
|
662 |
+
extended_attention_mask = attention_mask[:, None, :, :]
|
663 |
+
elif attention_mask.dim() == 2:
|
664 |
+
# Provided a padding mask of dimensions [batch_size, seq_length]
|
665 |
+
# - if the model is a decoder, apply a causal mask in addition to the padding mask
|
666 |
+
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
|
667 |
+
if is_decoder:
|
668 |
+
batch_size, seq_length = input_shape
|
669 |
+
|
670 |
+
seq_ids = torch.arange(seq_length, device=device)
|
671 |
+
causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
|
672 |
+
# in case past_key_values are used we need to add a prefix ones mask to the causal mask
|
673 |
+
# causal and attention masks must have same type with pytorch version < 1.3
|
674 |
+
causal_mask = causal_mask.to(attention_mask.dtype)
|
675 |
+
|
676 |
+
if causal_mask.shape[1] < attention_mask.shape[1]:
|
677 |
+
prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
|
678 |
+
causal_mask = torch.cat(
|
679 |
+
[
|
680 |
+
torch.ones((batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype),
|
681 |
+
causal_mask,
|
682 |
+
],
|
683 |
+
axis=-1,
|
684 |
+
)
|
685 |
+
|
686 |
+
extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
|
687 |
+
else:
|
688 |
+
extended_attention_mask = attention_mask[:, None, None, :]
|
689 |
+
else:
|
690 |
+
raise ValueError(
|
691 |
+
"Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
|
692 |
+
input_shape, attention_mask.shape
|
693 |
+
)
|
694 |
+
)
|
695 |
+
|
696 |
+
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
|
697 |
+
# masked positions, this operation will create a tensor which is 0.0 for
|
698 |
+
# positions we want to attend and -10000.0 for masked positions.
|
699 |
+
# Since we are adding it to the raw scores before the softmax, this is
|
700 |
+
# effectively the same as removing these entirely.
|
701 |
+
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
|
702 |
+
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
|
703 |
+
return extended_attention_mask
|
704 |
+
|
705 |
+
def forward(
|
706 |
+
self,
|
707 |
+
input_ids=None,
|
708 |
+
attention_mask=None,
|
709 |
+
position_ids=None,
|
710 |
+
head_mask=None,
|
711 |
+
inputs_embeds=None,
|
712 |
+
encoder_embeds=None,
|
713 |
+
encoder_hidden_states=None,
|
714 |
+
encoder_attention_mask=None,
|
715 |
+
past_key_values=None,
|
716 |
+
use_cache=None,
|
717 |
+
output_attentions=None,
|
718 |
+
output_hidden_states=None,
|
719 |
+
return_dict=None,
|
720 |
+
is_decoder=False,
|
721 |
+
mode='multimodal',
|
722 |
+
):
|
723 |
+
r"""
|
724 |
+
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
|
725 |
+
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
|
726 |
+
the model is configured as a decoder.
|
727 |
+
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
|
728 |
+
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
|
729 |
+
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
|
730 |
+
- 1 for tokens that are **not masked**,
|
731 |
+
- 0 for tokens that are **masked**.
|
732 |
+
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
|
733 |
+
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
|
734 |
+
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
|
735 |
+
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
|
736 |
+
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
|
737 |
+
use_cache (:obj:`bool`, `optional`):
|
738 |
+
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
|
739 |
+
decoding (see :obj:`past_key_values`).
|
740 |
+
"""
|
741 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
742 |
+
output_hidden_states = (
|
743 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
744 |
+
)
|
745 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
746 |
+
|
747 |
+
if is_decoder:
|
748 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
749 |
+
else:
|
750 |
+
use_cache = False
|
751 |
+
|
752 |
+
if input_ids is not None and inputs_embeds is not None:
|
753 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
754 |
+
elif input_ids is not None:
|
755 |
+
input_shape = input_ids.size()
|
756 |
+
batch_size, seq_length = input_shape
|
757 |
+
device = input_ids.device
|
758 |
+
elif inputs_embeds is not None:
|
759 |
+
input_shape = inputs_embeds.size()[:-1]
|
760 |
+
batch_size, seq_length = input_shape
|
761 |
+
device = inputs_embeds.device
|
762 |
+
elif encoder_embeds is not None:
|
763 |
+
input_shape = encoder_embeds.size()[:-1]
|
764 |
+
batch_size, seq_length = input_shape
|
765 |
+
device = encoder_embeds.device
|
766 |
+
else:
|
767 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds or encoder_embeds")
|
768 |
+
|
769 |
+
# past_key_values_length
|
770 |
+
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
|
771 |
+
|
772 |
+
if attention_mask is None:
|
773 |
+
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
|
774 |
+
|
775 |
+
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
|
776 |
+
# ourselves in which case we just need to make it broadcastable to all heads.
|
777 |
+
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape,
|
778 |
+
device, is_decoder)
|
779 |
+
|
780 |
+
# If a 2D or 3D attention mask is provided for the cross-attention
|
781 |
+
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
|
782 |
+
if encoder_hidden_states is not None:
|
783 |
+
if type(encoder_hidden_states) == list:
|
784 |
+
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()
|
785 |
+
else:
|
786 |
+
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
|
787 |
+
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
|
788 |
+
|
789 |
+
if type(encoder_attention_mask) == list:
|
790 |
+
encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]
|
791 |
+
elif encoder_attention_mask is None:
|
792 |
+
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
|
793 |
+
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
|
794 |
+
else:
|
795 |
+
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
|
796 |
+
else:
|
797 |
+
encoder_extended_attention_mask = None
|
798 |
+
|
799 |
+
# Prepare head mask if needed
|
800 |
+
# 1.0 in head_mask indicate we keep the head
|
801 |
+
# attention_probs has shape bsz x n_heads x N x N
|
802 |
+
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
|
803 |
+
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
|
804 |
+
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
|
805 |
+
|
806 |
+
if encoder_embeds is None:
|
807 |
+
embedding_output = self.embeddings(
|
808 |
+
input_ids=input_ids,
|
809 |
+
position_ids=position_ids,
|
810 |
+
inputs_embeds=inputs_embeds,
|
811 |
+
past_key_values_length=past_key_values_length,
|
812 |
+
)
|
813 |
+
else:
|
814 |
+
embedding_output = encoder_embeds
|
815 |
+
|
816 |
+
encoder_outputs = self.encoder(
|
817 |
+
embedding_output,
|
818 |
+
attention_mask=extended_attention_mask,
|
819 |
+
head_mask=head_mask,
|
820 |
+
encoder_hidden_states=encoder_hidden_states,
|
821 |
+
encoder_attention_mask=encoder_extended_attention_mask,
|
822 |
+
past_key_values=past_key_values,
|
823 |
+
use_cache=use_cache,
|
824 |
+
output_attentions=output_attentions,
|
825 |
+
output_hidden_states=output_hidden_states,
|
826 |
+
return_dict=return_dict,
|
827 |
+
mode=mode,
|
828 |
+
)
|
829 |
+
sequence_output = encoder_outputs[0]
|
830 |
+
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
|
831 |
+
|
832 |
+
if not return_dict:
|
833 |
+
return (sequence_output, pooled_output) + encoder_outputs[1:]
|
834 |
+
|
835 |
+
return BaseModelOutputWithPoolingAndCrossAttentions(
|
836 |
+
last_hidden_state=sequence_output,
|
837 |
+
pooler_output=pooled_output,
|
838 |
+
past_key_values=encoder_outputs.past_key_values,
|
839 |
+
hidden_states=encoder_outputs.hidden_states,
|
840 |
+
attentions=encoder_outputs.attentions,
|
841 |
+
cross_attentions=encoder_outputs.cross_attentions,
|
842 |
+
)
|
843 |
+
|
extras/BLIP/models/vit.py
ADDED
@@ -0,0 +1,308 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
* Copyright (c) 2022, salesforce.com, inc.
|
3 |
+
* All rights reserved.
|
4 |
+
* SPDX-License-Identifier: BSD-3-Clause
|
5 |
+
* For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
|
6 |
+
* By Junnan Li
|
7 |
+
* Based on timm code base
|
8 |
+
* https://github.com/rwightman/pytorch-image-models/tree/master/timm
|
9 |
+
'''
|
10 |
+
|
11 |
+
import torch
|
12 |
+
import torch.nn as nn
|
13 |
+
import torch.nn.functional as F
|
14 |
+
from functools import partial
|
15 |
+
|
16 |
+
from timm.models.vision_transformer import _cfg, PatchEmbed
|
17 |
+
from timm.models.registry import register_model
|
18 |
+
from timm.models.layers import trunc_normal_, DropPath
|
19 |
+
from timm.models.helpers import named_apply, adapt_input_conv
|
20 |
+
|
21 |
+
|
22 |
+
def checkpoint_wrapper(x):
|
23 |
+
return x
|
24 |
+
|
25 |
+
|
26 |
+
class Mlp(nn.Module):
|
27 |
+
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
|
28 |
+
"""
|
29 |
+
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
|
30 |
+
super().__init__()
|
31 |
+
out_features = out_features or in_features
|
32 |
+
hidden_features = hidden_features or in_features
|
33 |
+
self.fc1 = nn.Linear(in_features, hidden_features)
|
34 |
+
self.act = act_layer()
|
35 |
+
self.fc2 = nn.Linear(hidden_features, out_features)
|
36 |
+
self.drop = nn.Dropout(drop)
|
37 |
+
|
38 |
+
def forward(self, x):
|
39 |
+
x = self.fc1(x)
|
40 |
+
x = self.act(x)
|
41 |
+
x = self.drop(x)
|
42 |
+
x = self.fc2(x)
|
43 |
+
x = self.drop(x)
|
44 |
+
return x
|
45 |
+
|
46 |
+
|
47 |
+
class Attention(nn.Module):
|
48 |
+
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
|
49 |
+
super().__init__()
|
50 |
+
self.num_heads = num_heads
|
51 |
+
head_dim = dim // num_heads
|
52 |
+
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
|
53 |
+
self.scale = qk_scale or head_dim ** -0.5
|
54 |
+
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
|
55 |
+
self.attn_drop = nn.Dropout(attn_drop)
|
56 |
+
self.proj = nn.Linear(dim, dim)
|
57 |
+
self.proj_drop = nn.Dropout(proj_drop)
|
58 |
+
self.attn_gradients = None
|
59 |
+
self.attention_map = None
|
60 |
+
|
61 |
+
def save_attn_gradients(self, attn_gradients):
|
62 |
+
self.attn_gradients = attn_gradients
|
63 |
+
|
64 |
+
def get_attn_gradients(self):
|
65 |
+
return self.attn_gradients
|
66 |
+
|
67 |
+
def save_attention_map(self, attention_map):
|
68 |
+
self.attention_map = attention_map
|
69 |
+
|
70 |
+
def get_attention_map(self):
|
71 |
+
return self.attention_map
|
72 |
+
|
73 |
+
def forward(self, x, register_hook=False):
|
74 |
+
B, N, C = x.shape
|
75 |
+
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
|
76 |
+
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
|
77 |
+
|
78 |
+
attn = (q @ k.transpose(-2, -1)) * self.scale
|
79 |
+
attn = attn.softmax(dim=-1)
|
80 |
+
attn = self.attn_drop(attn)
|
81 |
+
|
82 |
+
if register_hook:
|
83 |
+
self.save_attention_map(attn)
|
84 |
+
attn.register_hook(self.save_attn_gradients)
|
85 |
+
|
86 |
+
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
|
87 |
+
x = self.proj(x)
|
88 |
+
x = self.proj_drop(x)
|
89 |
+
return x
|
90 |
+
|
91 |
+
|
92 |
+
class Block(nn.Module):
|
93 |
+
|
94 |
+
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
|
95 |
+
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_grad_checkpointing=False):
|
96 |
+
super().__init__()
|
97 |
+
self.norm1 = norm_layer(dim)
|
98 |
+
self.attn = Attention(
|
99 |
+
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
|
100 |
+
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
|
101 |
+
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
102 |
+
self.norm2 = norm_layer(dim)
|
103 |
+
mlp_hidden_dim = int(dim * mlp_ratio)
|
104 |
+
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
|
105 |
+
|
106 |
+
if use_grad_checkpointing:
|
107 |
+
self.attn = checkpoint_wrapper(self.attn)
|
108 |
+
self.mlp = checkpoint_wrapper(self.mlp)
|
109 |
+
|
110 |
+
def forward(self, x, register_hook=False):
|
111 |
+
x = x + self.drop_path(self.attn(self.norm1(x), register_hook=register_hook))
|
112 |
+
x = x + self.drop_path(self.mlp(self.norm2(x)))
|
113 |
+
return x
|
114 |
+
|
115 |
+
|
116 |
+
class VisionTransformer(nn.Module):
|
117 |
+
""" Vision Transformer
|
118 |
+
A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` -
|
119 |
+
https://arxiv.org/abs/2010.11929
|
120 |
+
"""
|
121 |
+
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
|
122 |
+
num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None,
|
123 |
+
drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=None,
|
124 |
+
use_grad_checkpointing=False, ckpt_layer=0):
|
125 |
+
"""
|
126 |
+
Args:
|
127 |
+
img_size (int, tuple): input image size
|
128 |
+
patch_size (int, tuple): patch size
|
129 |
+
in_chans (int): number of input channels
|
130 |
+
num_classes (int): number of classes for classification head
|
131 |
+
embed_dim (int): embedding dimension
|
132 |
+
depth (int): depth of transformer
|
133 |
+
num_heads (int): number of attention heads
|
134 |
+
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
|
135 |
+
qkv_bias (bool): enable bias for qkv if True
|
136 |
+
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
|
137 |
+
representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set
|
138 |
+
drop_rate (float): dropout rate
|
139 |
+
attn_drop_rate (float): attention dropout rate
|
140 |
+
drop_path_rate (float): stochastic depth rate
|
141 |
+
norm_layer: (nn.Module): normalization layer
|
142 |
+
"""
|
143 |
+
super().__init__()
|
144 |
+
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
|
145 |
+
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
|
146 |
+
|
147 |
+
self.patch_embed = PatchEmbed(
|
148 |
+
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
|
149 |
+
|
150 |
+
num_patches = self.patch_embed.num_patches
|
151 |
+
|
152 |
+
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
|
153 |
+
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
|
154 |
+
self.pos_drop = nn.Dropout(p=drop_rate)
|
155 |
+
|
156 |
+
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
|
157 |
+
self.blocks = nn.ModuleList([
|
158 |
+
Block(
|
159 |
+
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
|
160 |
+
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
|
161 |
+
use_grad_checkpointing=(use_grad_checkpointing and i>=depth-ckpt_layer)
|
162 |
+
)
|
163 |
+
for i in range(depth)])
|
164 |
+
self.norm = norm_layer(embed_dim)
|
165 |
+
|
166 |
+
trunc_normal_(self.pos_embed, std=.02)
|
167 |
+
trunc_normal_(self.cls_token, std=.02)
|
168 |
+
self.apply(self._init_weights)
|
169 |
+
|
170 |
+
def _init_weights(self, m):
|
171 |
+
if isinstance(m, nn.Linear):
|
172 |
+
trunc_normal_(m.weight, std=.02)
|
173 |
+
if isinstance(m, nn.Linear) and m.bias is not None:
|
174 |
+
nn.init.constant_(m.bias, 0)
|
175 |
+
elif isinstance(m, nn.LayerNorm):
|
176 |
+
nn.init.constant_(m.bias, 0)
|
177 |
+
nn.init.constant_(m.weight, 1.0)
|
178 |
+
|
179 |
+
@torch.jit.ignore
|
180 |
+
def no_weight_decay(self):
|
181 |
+
return {'pos_embed', 'cls_token'}
|
182 |
+
|
183 |
+
def forward(self, x, register_blk=-1):
|
184 |
+
B = x.shape[0]
|
185 |
+
x = self.patch_embed(x)
|
186 |
+
|
187 |
+
cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
|
188 |
+
x = torch.cat((cls_tokens, x), dim=1)
|
189 |
+
|
190 |
+
x = x + self.pos_embed[:,:x.size(1),:]
|
191 |
+
x = self.pos_drop(x)
|
192 |
+
|
193 |
+
for i,blk in enumerate(self.blocks):
|
194 |
+
x = blk(x, register_blk==i)
|
195 |
+
x = self.norm(x)
|
196 |
+
|
197 |
+
return x
|
198 |
+
|
199 |
+
@torch.jit.ignore()
|
200 |
+
def load_pretrained(self, checkpoint_path, prefix=''):
|
201 |
+
_load_weights(self, checkpoint_path, prefix)
|
202 |
+
|
203 |
+
|
204 |
+
@torch.no_grad()
|
205 |
+
def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ''):
|
206 |
+
""" Load weights from .npz checkpoints for official Google Brain Flax implementation
|
207 |
+
"""
|
208 |
+
import numpy as np
|
209 |
+
|
210 |
+
def _n2p(w, t=True):
|
211 |
+
if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1:
|
212 |
+
w = w.flatten()
|
213 |
+
if t:
|
214 |
+
if w.ndim == 4:
|
215 |
+
w = w.transpose([3, 2, 0, 1])
|
216 |
+
elif w.ndim == 3:
|
217 |
+
w = w.transpose([2, 0, 1])
|
218 |
+
elif w.ndim == 2:
|
219 |
+
w = w.transpose([1, 0])
|
220 |
+
return torch.from_numpy(w)
|
221 |
+
|
222 |
+
w = np.load(checkpoint_path)
|
223 |
+
if not prefix and 'opt/target/embedding/kernel' in w:
|
224 |
+
prefix = 'opt/target/'
|
225 |
+
|
226 |
+
if hasattr(model.patch_embed, 'backbone'):
|
227 |
+
# hybrid
|
228 |
+
backbone = model.patch_embed.backbone
|
229 |
+
stem_only = not hasattr(backbone, 'stem')
|
230 |
+
stem = backbone if stem_only else backbone.stem
|
231 |
+
stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel'])))
|
232 |
+
stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale']))
|
233 |
+
stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias']))
|
234 |
+
if not stem_only:
|
235 |
+
for i, stage in enumerate(backbone.stages):
|
236 |
+
for j, block in enumerate(stage.blocks):
|
237 |
+
bp = f'{prefix}block{i + 1}/unit{j + 1}/'
|
238 |
+
for r in range(3):
|
239 |
+
getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel']))
|
240 |
+
getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale']))
|
241 |
+
getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias']))
|
242 |
+
if block.downsample is not None:
|
243 |
+
block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel']))
|
244 |
+
block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale']))
|
245 |
+
block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias']))
|
246 |
+
embed_conv_w = _n2p(w[f'{prefix}embedding/kernel'])
|
247 |
+
else:
|
248 |
+
embed_conv_w = adapt_input_conv(
|
249 |
+
model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel']))
|
250 |
+
model.patch_embed.proj.weight.copy_(embed_conv_w)
|
251 |
+
model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias']))
|
252 |
+
model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False))
|
253 |
+
pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False)
|
254 |
+
if pos_embed_w.shape != model.pos_embed.shape:
|
255 |
+
pos_embed_w = resize_pos_embed( # resize pos embedding when different size from pretrained weights
|
256 |
+
pos_embed_w, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size)
|
257 |
+
model.pos_embed.copy_(pos_embed_w)
|
258 |
+
model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale']))
|
259 |
+
model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias']))
|
260 |
+
# if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]:
|
261 |
+
# model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel']))
|
262 |
+
# model.head.bias.copy_(_n2p(w[f'{prefix}head/bias']))
|
263 |
+
# if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w:
|
264 |
+
# model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel']))
|
265 |
+
# model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias']))
|
266 |
+
for i, block in enumerate(model.blocks.children()):
|
267 |
+
block_prefix = f'{prefix}Transformer/encoderblock_{i}/'
|
268 |
+
mha_prefix = block_prefix + 'MultiHeadDotProductAttention_1/'
|
269 |
+
block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale']))
|
270 |
+
block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias']))
|
271 |
+
block.attn.qkv.weight.copy_(torch.cat([
|
272 |
+
_n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('query', 'key', 'value')]))
|
273 |
+
block.attn.qkv.bias.copy_(torch.cat([
|
274 |
+
_n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('query', 'key', 'value')]))
|
275 |
+
block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1))
|
276 |
+
block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias']))
|
277 |
+
for r in range(2):
|
278 |
+
getattr(block.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/kernel']))
|
279 |
+
getattr(block.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/bias']))
|
280 |
+
block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/scale']))
|
281 |
+
block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/bias']))
|
282 |
+
|
283 |
+
|
284 |
+
def interpolate_pos_embed(pos_embed_checkpoint, visual_encoder):
|
285 |
+
# interpolate position embedding
|
286 |
+
embedding_size = pos_embed_checkpoint.shape[-1]
|
287 |
+
num_patches = visual_encoder.patch_embed.num_patches
|
288 |
+
num_extra_tokens = visual_encoder.pos_embed.shape[-2] - num_patches
|
289 |
+
# height (== width) for the checkpoint position embedding
|
290 |
+
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
|
291 |
+
# height (== width) for the new position embedding
|
292 |
+
new_size = int(num_patches ** 0.5)
|
293 |
+
|
294 |
+
if orig_size!=new_size:
|
295 |
+
# class_token and dist_token are kept unchanged
|
296 |
+
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
|
297 |
+
# only the position tokens are interpolated
|
298 |
+
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
|
299 |
+
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
|
300 |
+
pos_tokens = torch.nn.functional.interpolate(
|
301 |
+
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
|
302 |
+
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
|
303 |
+
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
|
304 |
+
print('reshape position embedding from %d to %d'%(orig_size ** 2,new_size ** 2))
|
305 |
+
|
306 |
+
return new_pos_embed
|
307 |
+
else:
|
308 |
+
return pos_embed_checkpoint
|
extras/expansion.py
ADDED
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Fooocus GPT2 Expansion
|
2 |
+
# Algorithm created by Lvmin Zhang at 2023, Stanford
|
3 |
+
# If used inside Fooocus, any use is permitted.
|
4 |
+
# If used outside Fooocus, only non-commercial use is permitted (CC-By NC 4.0).
|
5 |
+
# This applies to the word list, vocab, model, and algorithm.
|
6 |
+
|
7 |
+
|
8 |
+
import os
|
9 |
+
import torch
|
10 |
+
import math
|
11 |
+
import ldm_patched.modules.model_management as model_management
|
12 |
+
|
13 |
+
from transformers.generation.logits_process import LogitsProcessorList
|
14 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed
|
15 |
+
from modules.config import path_fooocus_expansion
|
16 |
+
from ldm_patched.modules.model_patcher import ModelPatcher
|
17 |
+
|
18 |
+
|
19 |
+
# limitation of np.random.seed(), called from transformers.set_seed()
|
20 |
+
SEED_LIMIT_NUMPY = 2**32
|
21 |
+
neg_inf = - 8192.0
|
22 |
+
|
23 |
+
|
24 |
+
def safe_str(x):
|
25 |
+
x = str(x)
|
26 |
+
for _ in range(16):
|
27 |
+
x = x.replace(' ', ' ')
|
28 |
+
return x.strip(",. \r\n")
|
29 |
+
|
30 |
+
|
31 |
+
def remove_pattern(x, pattern):
|
32 |
+
for p in pattern:
|
33 |
+
x = x.replace(p, '')
|
34 |
+
return x
|
35 |
+
|
36 |
+
|
37 |
+
class FooocusExpansion:
|
38 |
+
def __init__(self):
|
39 |
+
self.tokenizer = AutoTokenizer.from_pretrained(path_fooocus_expansion)
|
40 |
+
|
41 |
+
positive_words = open(os.path.join(path_fooocus_expansion, 'positive.txt'),
|
42 |
+
encoding='utf-8').read().splitlines()
|
43 |
+
positive_words = ['Ġ' + x.lower() for x in positive_words if x != '']
|
44 |
+
|
45 |
+
self.logits_bias = torch.zeros((1, len(self.tokenizer.vocab)), dtype=torch.float32) + neg_inf
|
46 |
+
|
47 |
+
debug_list = []
|
48 |
+
for k, v in self.tokenizer.vocab.items():
|
49 |
+
if k in positive_words:
|
50 |
+
self.logits_bias[0, v] = 0
|
51 |
+
debug_list.append(k[1:])
|
52 |
+
|
53 |
+
print(f'Fooocus V2 Expansion: Vocab with {len(debug_list)} words.')
|
54 |
+
|
55 |
+
# debug_list = '\n'.join(sorted(debug_list))
|
56 |
+
# print(debug_list)
|
57 |
+
|
58 |
+
# t11 = self.tokenizer(',', return_tensors="np")
|
59 |
+
# t198 = self.tokenizer('\n', return_tensors="np")
|
60 |
+
# eos = self.tokenizer.eos_token_id
|
61 |
+
|
62 |
+
self.model = AutoModelForCausalLM.from_pretrained(path_fooocus_expansion)
|
63 |
+
self.model.eval()
|
64 |
+
|
65 |
+
load_device = model_management.text_encoder_device()
|
66 |
+
offload_device = model_management.text_encoder_offload_device()
|
67 |
+
|
68 |
+
# MPS hack
|
69 |
+
if model_management.is_device_mps(load_device):
|
70 |
+
load_device = torch.device('cpu')
|
71 |
+
offload_device = torch.device('cpu')
|
72 |
+
|
73 |
+
use_fp16 = model_management.should_use_fp16(device=load_device)
|
74 |
+
|
75 |
+
if use_fp16:
|
76 |
+
self.model.half()
|
77 |
+
|
78 |
+
self.patcher = ModelPatcher(self.model, load_device=load_device, offload_device=offload_device)
|
79 |
+
print(f'Fooocus Expansion engine loaded for {load_device}, use_fp16 = {use_fp16}.')
|
80 |
+
|
81 |
+
@torch.no_grad()
|
82 |
+
@torch.inference_mode()
|
83 |
+
def logits_processor(self, input_ids, scores):
|
84 |
+
assert scores.ndim == 2 and scores.shape[0] == 1
|
85 |
+
self.logits_bias = self.logits_bias.to(scores)
|
86 |
+
|
87 |
+
bias = self.logits_bias.clone()
|
88 |
+
bias[0, input_ids[0].to(bias.device).long()] = neg_inf
|
89 |
+
bias[0, 11] = 0
|
90 |
+
|
91 |
+
return scores + bias
|
92 |
+
|
93 |
+
@torch.no_grad()
|
94 |
+
@torch.inference_mode()
|
95 |
+
def __call__(self, prompt, seed):
|
96 |
+
if prompt == '':
|
97 |
+
return ''
|
98 |
+
|
99 |
+
if self.patcher.current_device != self.patcher.load_device:
|
100 |
+
print('Fooocus Expansion loaded by itself.')
|
101 |
+
model_management.load_model_gpu(self.patcher)
|
102 |
+
|
103 |
+
seed = int(seed) % SEED_LIMIT_NUMPY
|
104 |
+
set_seed(seed)
|
105 |
+
prompt = safe_str(prompt) + ','
|
106 |
+
|
107 |
+
tokenized_kwargs = self.tokenizer(prompt, return_tensors="pt")
|
108 |
+
tokenized_kwargs.data['input_ids'] = tokenized_kwargs.data['input_ids'].to(self.patcher.load_device)
|
109 |
+
tokenized_kwargs.data['attention_mask'] = tokenized_kwargs.data['attention_mask'].to(self.patcher.load_device)
|
110 |
+
|
111 |
+
current_token_length = int(tokenized_kwargs.data['input_ids'].shape[1])
|
112 |
+
max_token_length = 75 * int(math.ceil(float(current_token_length) / 75.0))
|
113 |
+
max_new_tokens = max_token_length - current_token_length
|
114 |
+
|
115 |
+
# https://huggingface.co/blog/introducing-csearch
|
116 |
+
# https://huggingface.co/docs/transformers/generation_strategies
|
117 |
+
features = self.model.generate(**tokenized_kwargs,
|
118 |
+
top_k=100,
|
119 |
+
max_new_tokens=max_new_tokens,
|
120 |
+
do_sample=True,
|
121 |
+
logits_processor=LogitsProcessorList([self.logits_processor]))
|
122 |
+
|
123 |
+
response = self.tokenizer.batch_decode(features, skip_special_tokens=True)
|
124 |
+
result = safe_str(response[0])
|
125 |
+
|
126 |
+
return result
|
extras/face_crop.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
import modules.config
|
4 |
+
|
5 |
+
|
6 |
+
faceRestoreHelper = None
|
7 |
+
|
8 |
+
|
9 |
+
def align_warp_face(self, landmark, border_mode='constant'):
|
10 |
+
affine_matrix = cv2.estimateAffinePartial2D(landmark, self.face_template, method=cv2.LMEDS)[0]
|
11 |
+
self.affine_matrices.append(affine_matrix)
|
12 |
+
if border_mode == 'constant':
|
13 |
+
border_mode = cv2.BORDER_CONSTANT
|
14 |
+
elif border_mode == 'reflect101':
|
15 |
+
border_mode = cv2.BORDER_REFLECT101
|
16 |
+
elif border_mode == 'reflect':
|
17 |
+
border_mode = cv2.BORDER_REFLECT
|
18 |
+
input_img = self.input_img
|
19 |
+
cropped_face = cv2.warpAffine(input_img, affine_matrix, self.face_size,
|
20 |
+
borderMode=border_mode, borderValue=(135, 133, 132))
|
21 |
+
return cropped_face
|
22 |
+
|
23 |
+
|
24 |
+
def crop_image(img_rgb):
|
25 |
+
global faceRestoreHelper
|
26 |
+
|
27 |
+
if faceRestoreHelper is None:
|
28 |
+
from extras.facexlib.utils.face_restoration_helper import FaceRestoreHelper
|
29 |
+
faceRestoreHelper = FaceRestoreHelper(
|
30 |
+
upscale_factor=1,
|
31 |
+
model_rootpath=modules.config.path_controlnet,
|
32 |
+
device='cpu' # use cpu is safer since we are out of memory management
|
33 |
+
)
|
34 |
+
|
35 |
+
faceRestoreHelper.clean_all()
|
36 |
+
faceRestoreHelper.read_image(np.ascontiguousarray(img_rgb[:, :, ::-1].copy()))
|
37 |
+
faceRestoreHelper.get_face_landmarks_5()
|
38 |
+
|
39 |
+
landmarks = faceRestoreHelper.all_landmarks_5
|
40 |
+
# landmarks are already sorted with confidence.
|
41 |
+
|
42 |
+
if len(landmarks) == 0:
|
43 |
+
print('No face detected')
|
44 |
+
return img_rgb
|
45 |
+
else:
|
46 |
+
print(f'Detected {len(landmarks)} faces')
|
47 |
+
|
48 |
+
result = align_warp_face(faceRestoreHelper, landmarks[0])
|
49 |
+
|
50 |
+
return np.ascontiguousarray(result[:, :, ::-1].copy())
|
extras/facexlib/detection/__init__.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from copy import deepcopy
|
3 |
+
|
4 |
+
from extras.facexlib.utils import load_file_from_url
|
5 |
+
from .retinaface import RetinaFace
|
6 |
+
|
7 |
+
|
8 |
+
def init_detection_model(model_name, half=False, device='cuda', model_rootpath=None):
|
9 |
+
if model_name == 'retinaface_resnet50':
|
10 |
+
model = RetinaFace(network_name='resnet50', half=half, device=device)
|
11 |
+
model_url = 'https://github.com/xinntao/facexlib/releases/download/v0.1.0/detection_Resnet50_Final.pth'
|
12 |
+
elif model_name == 'retinaface_mobile0.25':
|
13 |
+
model = RetinaFace(network_name='mobile0.25', half=half, device=device)
|
14 |
+
model_url = 'https://github.com/xinntao/facexlib/releases/download/v0.1.0/detection_mobilenet0.25_Final.pth'
|
15 |
+
else:
|
16 |
+
raise NotImplementedError(f'{model_name} is not implemented.')
|
17 |
+
|
18 |
+
model_path = load_file_from_url(
|
19 |
+
url=model_url, model_dir='facexlib/weights', progress=True, file_name=None, save_dir=model_rootpath)
|
20 |
+
|
21 |
+
# TODO: clean pretrained model
|
22 |
+
load_net = torch.load(model_path, map_location=lambda storage, loc: storage)
|
23 |
+
# remove unnecessary 'module.'
|
24 |
+
for k, v in deepcopy(load_net).items():
|
25 |
+
if k.startswith('module.'):
|
26 |
+
load_net[k[7:]] = v
|
27 |
+
load_net.pop(k)
|
28 |
+
model.load_state_dict(load_net, strict=True)
|
29 |
+
model.eval()
|
30 |
+
model = model.to(device)
|
31 |
+
return model
|
extras/facexlib/detection/align_trans.py
ADDED
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
|
4 |
+
from .matlab_cp2tform import get_similarity_transform_for_cv2
|
5 |
+
|
6 |
+
# reference facial points, a list of coordinates (x,y)
|
7 |
+
REFERENCE_FACIAL_POINTS = [[30.29459953, 51.69630051], [65.53179932, 51.50139999], [48.02519989, 71.73660278],
|
8 |
+
[33.54930115, 92.3655014], [62.72990036, 92.20410156]]
|
9 |
+
|
10 |
+
DEFAULT_CROP_SIZE = (96, 112)
|
11 |
+
|
12 |
+
|
13 |
+
class FaceWarpException(Exception):
|
14 |
+
|
15 |
+
def __str__(self):
|
16 |
+
return 'In File {}:{}'.format(__file__, super.__str__(self))
|
17 |
+
|
18 |
+
|
19 |
+
def get_reference_facial_points(output_size=None, inner_padding_factor=0.0, outer_padding=(0, 0), default_square=False):
|
20 |
+
"""
|
21 |
+
Function:
|
22 |
+
----------
|
23 |
+
get reference 5 key points according to crop settings:
|
24 |
+
0. Set default crop_size:
|
25 |
+
if default_square:
|
26 |
+
crop_size = (112, 112)
|
27 |
+
else:
|
28 |
+
crop_size = (96, 112)
|
29 |
+
1. Pad the crop_size by inner_padding_factor in each side;
|
30 |
+
2. Resize crop_size into (output_size - outer_padding*2),
|
31 |
+
pad into output_size with outer_padding;
|
32 |
+
3. Output reference_5point;
|
33 |
+
Parameters:
|
34 |
+
----------
|
35 |
+
@output_size: (w, h) or None
|
36 |
+
size of aligned face image
|
37 |
+
@inner_padding_factor: (w_factor, h_factor)
|
38 |
+
padding factor for inner (w, h)
|
39 |
+
@outer_padding: (w_pad, h_pad)
|
40 |
+
each row is a pair of coordinates (x, y)
|
41 |
+
@default_square: True or False
|
42 |
+
if True:
|
43 |
+
default crop_size = (112, 112)
|
44 |
+
else:
|
45 |
+
default crop_size = (96, 112);
|
46 |
+
!!! make sure, if output_size is not None:
|
47 |
+
(output_size - outer_padding)
|
48 |
+
= some_scale * (default crop_size * (1.0 +
|
49 |
+
inner_padding_factor))
|
50 |
+
Returns:
|
51 |
+
----------
|
52 |
+
@reference_5point: 5x2 np.array
|
53 |
+
each row is a pair of transformed coordinates (x, y)
|
54 |
+
"""
|
55 |
+
|
56 |
+
tmp_5pts = np.array(REFERENCE_FACIAL_POINTS)
|
57 |
+
tmp_crop_size = np.array(DEFAULT_CROP_SIZE)
|
58 |
+
|
59 |
+
# 0) make the inner region a square
|
60 |
+
if default_square:
|
61 |
+
size_diff = max(tmp_crop_size) - tmp_crop_size
|
62 |
+
tmp_5pts += size_diff / 2
|
63 |
+
tmp_crop_size += size_diff
|
64 |
+
|
65 |
+
if (output_size and output_size[0] == tmp_crop_size[0] and output_size[1] == tmp_crop_size[1]):
|
66 |
+
|
67 |
+
return tmp_5pts
|
68 |
+
|
69 |
+
if (inner_padding_factor == 0 and outer_padding == (0, 0)):
|
70 |
+
if output_size is None:
|
71 |
+
return tmp_5pts
|
72 |
+
else:
|
73 |
+
raise FaceWarpException('No paddings to do, output_size must be None or {}'.format(tmp_crop_size))
|
74 |
+
|
75 |
+
# check output size
|
76 |
+
if not (0 <= inner_padding_factor <= 1.0):
|
77 |
+
raise FaceWarpException('Not (0 <= inner_padding_factor <= 1.0)')
|
78 |
+
|
79 |
+
if ((inner_padding_factor > 0 or outer_padding[0] > 0 or outer_padding[1] > 0) and output_size is None):
|
80 |
+
output_size = tmp_crop_size * \
|
81 |
+
(1 + inner_padding_factor * 2).astype(np.int32)
|
82 |
+
output_size += np.array(outer_padding)
|
83 |
+
if not (outer_padding[0] < output_size[0] and outer_padding[1] < output_size[1]):
|
84 |
+
raise FaceWarpException('Not (outer_padding[0] < output_size[0] and outer_padding[1] < output_size[1])')
|
85 |
+
|
86 |
+
# 1) pad the inner region according inner_padding_factor
|
87 |
+
if inner_padding_factor > 0:
|
88 |
+
size_diff = tmp_crop_size * inner_padding_factor * 2
|
89 |
+
tmp_5pts += size_diff / 2
|
90 |
+
tmp_crop_size += np.round(size_diff).astype(np.int32)
|
91 |
+
|
92 |
+
# 2) resize the padded inner region
|
93 |
+
size_bf_outer_pad = np.array(output_size) - np.array(outer_padding) * 2
|
94 |
+
|
95 |
+
if size_bf_outer_pad[0] * tmp_crop_size[1] != size_bf_outer_pad[1] * tmp_crop_size[0]:
|
96 |
+
raise FaceWarpException('Must have (output_size - outer_padding)'
|
97 |
+
'= some_scale * (crop_size * (1.0 + inner_padding_factor)')
|
98 |
+
|
99 |
+
scale_factor = size_bf_outer_pad[0].astype(np.float32) / tmp_crop_size[0]
|
100 |
+
tmp_5pts = tmp_5pts * scale_factor
|
101 |
+
# size_diff = tmp_crop_size * (scale_factor - min(scale_factor))
|
102 |
+
# tmp_5pts = tmp_5pts + size_diff / 2
|
103 |
+
tmp_crop_size = size_bf_outer_pad
|
104 |
+
|
105 |
+
# 3) add outer_padding to make output_size
|
106 |
+
reference_5point = tmp_5pts + np.array(outer_padding)
|
107 |
+
tmp_crop_size = output_size
|
108 |
+
|
109 |
+
return reference_5point
|
110 |
+
|
111 |
+
|
112 |
+
def get_affine_transform_matrix(src_pts, dst_pts):
|
113 |
+
"""
|
114 |
+
Function:
|
115 |
+
----------
|
116 |
+
get affine transform matrix 'tfm' from src_pts to dst_pts
|
117 |
+
Parameters:
|
118 |
+
----------
|
119 |
+
@src_pts: Kx2 np.array
|
120 |
+
source points matrix, each row is a pair of coordinates (x, y)
|
121 |
+
@dst_pts: Kx2 np.array
|
122 |
+
destination points matrix, each row is a pair of coordinates (x, y)
|
123 |
+
Returns:
|
124 |
+
----------
|
125 |
+
@tfm: 2x3 np.array
|
126 |
+
transform matrix from src_pts to dst_pts
|
127 |
+
"""
|
128 |
+
|
129 |
+
tfm = np.float32([[1, 0, 0], [0, 1, 0]])
|
130 |
+
n_pts = src_pts.shape[0]
|
131 |
+
ones = np.ones((n_pts, 1), src_pts.dtype)
|
132 |
+
src_pts_ = np.hstack([src_pts, ones])
|
133 |
+
dst_pts_ = np.hstack([dst_pts, ones])
|
134 |
+
|
135 |
+
A, res, rank, s = np.linalg.lstsq(src_pts_, dst_pts_)
|
136 |
+
|
137 |
+
if rank == 3:
|
138 |
+
tfm = np.float32([[A[0, 0], A[1, 0], A[2, 0]], [A[0, 1], A[1, 1], A[2, 1]]])
|
139 |
+
elif rank == 2:
|
140 |
+
tfm = np.float32([[A[0, 0], A[1, 0], 0], [A[0, 1], A[1, 1], 0]])
|
141 |
+
|
142 |
+
return tfm
|
143 |
+
|
144 |
+
|
145 |
+
def warp_and_crop_face(src_img, facial_pts, reference_pts=None, crop_size=(96, 112), align_type='smilarity'):
|
146 |
+
"""
|
147 |
+
Function:
|
148 |
+
----------
|
149 |
+
apply affine transform 'trans' to uv
|
150 |
+
Parameters:
|
151 |
+
----------
|
152 |
+
@src_img: 3x3 np.array
|
153 |
+
input image
|
154 |
+
@facial_pts: could be
|
155 |
+
1)a list of K coordinates (x,y)
|
156 |
+
or
|
157 |
+
2) Kx2 or 2xK np.array
|
158 |
+
each row or col is a pair of coordinates (x, y)
|
159 |
+
@reference_pts: could be
|
160 |
+
1) a list of K coordinates (x,y)
|
161 |
+
or
|
162 |
+
2) Kx2 or 2xK np.array
|
163 |
+
each row or col is a pair of coordinates (x, y)
|
164 |
+
or
|
165 |
+
3) None
|
166 |
+
if None, use default reference facial points
|
167 |
+
@crop_size: (w, h)
|
168 |
+
output face image size
|
169 |
+
@align_type: transform type, could be one of
|
170 |
+
1) 'similarity': use similarity transform
|
171 |
+
2) 'cv2_affine': use the first 3 points to do affine transform,
|
172 |
+
by calling cv2.getAffineTransform()
|
173 |
+
3) 'affine': use all points to do affine transform
|
174 |
+
Returns:
|
175 |
+
----------
|
176 |
+
@face_img: output face image with size (w, h) = @crop_size
|
177 |
+
"""
|
178 |
+
|
179 |
+
if reference_pts is None:
|
180 |
+
if crop_size[0] == 96 and crop_size[1] == 112:
|
181 |
+
reference_pts = REFERENCE_FACIAL_POINTS
|
182 |
+
else:
|
183 |
+
default_square = False
|
184 |
+
inner_padding_factor = 0
|
185 |
+
outer_padding = (0, 0)
|
186 |
+
output_size = crop_size
|
187 |
+
|
188 |
+
reference_pts = get_reference_facial_points(output_size, inner_padding_factor, outer_padding,
|
189 |
+
default_square)
|
190 |
+
|
191 |
+
ref_pts = np.float32(reference_pts)
|
192 |
+
ref_pts_shp = ref_pts.shape
|
193 |
+
if max(ref_pts_shp) < 3 or min(ref_pts_shp) != 2:
|
194 |
+
raise FaceWarpException('reference_pts.shape must be (K,2) or (2,K) and K>2')
|
195 |
+
|
196 |
+
if ref_pts_shp[0] == 2:
|
197 |
+
ref_pts = ref_pts.T
|
198 |
+
|
199 |
+
src_pts = np.float32(facial_pts)
|
200 |
+
src_pts_shp = src_pts.shape
|
201 |
+
if max(src_pts_shp) < 3 or min(src_pts_shp) != 2:
|
202 |
+
raise FaceWarpException('facial_pts.shape must be (K,2) or (2,K) and K>2')
|
203 |
+
|
204 |
+
if src_pts_shp[0] == 2:
|
205 |
+
src_pts = src_pts.T
|
206 |
+
|
207 |
+
if src_pts.shape != ref_pts.shape:
|
208 |
+
raise FaceWarpException('facial_pts and reference_pts must have the same shape')
|
209 |
+
|
210 |
+
if align_type == 'cv2_affine':
|
211 |
+
tfm = cv2.getAffineTransform(src_pts[0:3], ref_pts[0:3])
|
212 |
+
elif align_type == 'affine':
|
213 |
+
tfm = get_affine_transform_matrix(src_pts, ref_pts)
|
214 |
+
else:
|
215 |
+
tfm = get_similarity_transform_for_cv2(src_pts, ref_pts)
|
216 |
+
|
217 |
+
face_img = cv2.warpAffine(src_img, tfm, (crop_size[0], crop_size[1]))
|
218 |
+
|
219 |
+
return face_img
|
extras/facexlib/detection/matlab_cp2tform.py
ADDED
@@ -0,0 +1,317 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from numpy.linalg import inv, lstsq
|
3 |
+
from numpy.linalg import matrix_rank as rank
|
4 |
+
from numpy.linalg import norm
|
5 |
+
|
6 |
+
|
7 |
+
class MatlabCp2tormException(Exception):
|
8 |
+
|
9 |
+
def __str__(self):
|
10 |
+
return 'In File {}:{}'.format(__file__, super.__str__(self))
|
11 |
+
|
12 |
+
|
13 |
+
def tformfwd(trans, uv):
|
14 |
+
"""
|
15 |
+
Function:
|
16 |
+
----------
|
17 |
+
apply affine transform 'trans' to uv
|
18 |
+
|
19 |
+
Parameters:
|
20 |
+
----------
|
21 |
+
@trans: 3x3 np.array
|
22 |
+
transform matrix
|
23 |
+
@uv: Kx2 np.array
|
24 |
+
each row is a pair of coordinates (x, y)
|
25 |
+
|
26 |
+
Returns:
|
27 |
+
----------
|
28 |
+
@xy: Kx2 np.array
|
29 |
+
each row is a pair of transformed coordinates (x, y)
|
30 |
+
"""
|
31 |
+
uv = np.hstack((uv, np.ones((uv.shape[0], 1))))
|
32 |
+
xy = np.dot(uv, trans)
|
33 |
+
xy = xy[:, 0:-1]
|
34 |
+
return xy
|
35 |
+
|
36 |
+
|
37 |
+
def tforminv(trans, uv):
|
38 |
+
"""
|
39 |
+
Function:
|
40 |
+
----------
|
41 |
+
apply the inverse of affine transform 'trans' to uv
|
42 |
+
|
43 |
+
Parameters:
|
44 |
+
----------
|
45 |
+
@trans: 3x3 np.array
|
46 |
+
transform matrix
|
47 |
+
@uv: Kx2 np.array
|
48 |
+
each row is a pair of coordinates (x, y)
|
49 |
+
|
50 |
+
Returns:
|
51 |
+
----------
|
52 |
+
@xy: Kx2 np.array
|
53 |
+
each row is a pair of inverse-transformed coordinates (x, y)
|
54 |
+
"""
|
55 |
+
Tinv = inv(trans)
|
56 |
+
xy = tformfwd(Tinv, uv)
|
57 |
+
return xy
|
58 |
+
|
59 |
+
|
60 |
+
def findNonreflectiveSimilarity(uv, xy, options=None):
|
61 |
+
options = {'K': 2}
|
62 |
+
|
63 |
+
K = options['K']
|
64 |
+
M = xy.shape[0]
|
65 |
+
x = xy[:, 0].reshape((-1, 1)) # use reshape to keep a column vector
|
66 |
+
y = xy[:, 1].reshape((-1, 1)) # use reshape to keep a column vector
|
67 |
+
|
68 |
+
tmp1 = np.hstack((x, y, np.ones((M, 1)), np.zeros((M, 1))))
|
69 |
+
tmp2 = np.hstack((y, -x, np.zeros((M, 1)), np.ones((M, 1))))
|
70 |
+
X = np.vstack((tmp1, tmp2))
|
71 |
+
|
72 |
+
u = uv[:, 0].reshape((-1, 1)) # use reshape to keep a column vector
|
73 |
+
v = uv[:, 1].reshape((-1, 1)) # use reshape to keep a column vector
|
74 |
+
U = np.vstack((u, v))
|
75 |
+
|
76 |
+
# We know that X * r = U
|
77 |
+
if rank(X) >= 2 * K:
|
78 |
+
r, _, _, _ = lstsq(X, U, rcond=-1)
|
79 |
+
r = np.squeeze(r)
|
80 |
+
else:
|
81 |
+
raise Exception('cp2tform:twoUniquePointsReq')
|
82 |
+
sc = r[0]
|
83 |
+
ss = r[1]
|
84 |
+
tx = r[2]
|
85 |
+
ty = r[3]
|
86 |
+
|
87 |
+
Tinv = np.array([[sc, -ss, 0], [ss, sc, 0], [tx, ty, 1]])
|
88 |
+
T = inv(Tinv)
|
89 |
+
T[:, 2] = np.array([0, 0, 1])
|
90 |
+
|
91 |
+
return T, Tinv
|
92 |
+
|
93 |
+
|
94 |
+
def findSimilarity(uv, xy, options=None):
|
95 |
+
options = {'K': 2}
|
96 |
+
|
97 |
+
# uv = np.array(uv)
|
98 |
+
# xy = np.array(xy)
|
99 |
+
|
100 |
+
# Solve for trans1
|
101 |
+
trans1, trans1_inv = findNonreflectiveSimilarity(uv, xy, options)
|
102 |
+
|
103 |
+
# Solve for trans2
|
104 |
+
|
105 |
+
# manually reflect the xy data across the Y-axis
|
106 |
+
xyR = xy
|
107 |
+
xyR[:, 0] = -1 * xyR[:, 0]
|
108 |
+
|
109 |
+
trans2r, trans2r_inv = findNonreflectiveSimilarity(uv, xyR, options)
|
110 |
+
|
111 |
+
# manually reflect the tform to undo the reflection done on xyR
|
112 |
+
TreflectY = np.array([[-1, 0, 0], [0, 1, 0], [0, 0, 1]])
|
113 |
+
|
114 |
+
trans2 = np.dot(trans2r, TreflectY)
|
115 |
+
|
116 |
+
# Figure out if trans1 or trans2 is better
|
117 |
+
xy1 = tformfwd(trans1, uv)
|
118 |
+
norm1 = norm(xy1 - xy)
|
119 |
+
|
120 |
+
xy2 = tformfwd(trans2, uv)
|
121 |
+
norm2 = norm(xy2 - xy)
|
122 |
+
|
123 |
+
if norm1 <= norm2:
|
124 |
+
return trans1, trans1_inv
|
125 |
+
else:
|
126 |
+
trans2_inv = inv(trans2)
|
127 |
+
return trans2, trans2_inv
|
128 |
+
|
129 |
+
|
130 |
+
def get_similarity_transform(src_pts, dst_pts, reflective=True):
|
131 |
+
"""
|
132 |
+
Function:
|
133 |
+
----------
|
134 |
+
Find Similarity Transform Matrix 'trans':
|
135 |
+
u = src_pts[:, 0]
|
136 |
+
v = src_pts[:, 1]
|
137 |
+
x = dst_pts[:, 0]
|
138 |
+
y = dst_pts[:, 1]
|
139 |
+
[x, y, 1] = [u, v, 1] * trans
|
140 |
+
|
141 |
+
Parameters:
|
142 |
+
----------
|
143 |
+
@src_pts: Kx2 np.array
|
144 |
+
source points, each row is a pair of coordinates (x, y)
|
145 |
+
@dst_pts: Kx2 np.array
|
146 |
+
destination points, each row is a pair of transformed
|
147 |
+
coordinates (x, y)
|
148 |
+
@reflective: True or False
|
149 |
+
if True:
|
150 |
+
use reflective similarity transform
|
151 |
+
else:
|
152 |
+
use non-reflective similarity transform
|
153 |
+
|
154 |
+
Returns:
|
155 |
+
----------
|
156 |
+
@trans: 3x3 np.array
|
157 |
+
transform matrix from uv to xy
|
158 |
+
trans_inv: 3x3 np.array
|
159 |
+
inverse of trans, transform matrix from xy to uv
|
160 |
+
"""
|
161 |
+
|
162 |
+
if reflective:
|
163 |
+
trans, trans_inv = findSimilarity(src_pts, dst_pts)
|
164 |
+
else:
|
165 |
+
trans, trans_inv = findNonreflectiveSimilarity(src_pts, dst_pts)
|
166 |
+
|
167 |
+
return trans, trans_inv
|
168 |
+
|
169 |
+
|
170 |
+
def cvt_tform_mat_for_cv2(trans):
|
171 |
+
"""
|
172 |
+
Function:
|
173 |
+
----------
|
174 |
+
Convert Transform Matrix 'trans' into 'cv2_trans' which could be
|
175 |
+
directly used by cv2.warpAffine():
|
176 |
+
u = src_pts[:, 0]
|
177 |
+
v = src_pts[:, 1]
|
178 |
+
x = dst_pts[:, 0]
|
179 |
+
y = dst_pts[:, 1]
|
180 |
+
[x, y].T = cv_trans * [u, v, 1].T
|
181 |
+
|
182 |
+
Parameters:
|
183 |
+
----------
|
184 |
+
@trans: 3x3 np.array
|
185 |
+
transform matrix from uv to xy
|
186 |
+
|
187 |
+
Returns:
|
188 |
+
----------
|
189 |
+
@cv2_trans: 2x3 np.array
|
190 |
+
transform matrix from src_pts to dst_pts, could be directly used
|
191 |
+
for cv2.warpAffine()
|
192 |
+
"""
|
193 |
+
cv2_trans = trans[:, 0:2].T
|
194 |
+
|
195 |
+
return cv2_trans
|
196 |
+
|
197 |
+
|
198 |
+
def get_similarity_transform_for_cv2(src_pts, dst_pts, reflective=True):
|
199 |
+
"""
|
200 |
+
Function:
|
201 |
+
----------
|
202 |
+
Find Similarity Transform Matrix 'cv2_trans' which could be
|
203 |
+
directly used by cv2.warpAffine():
|
204 |
+
u = src_pts[:, 0]
|
205 |
+
v = src_pts[:, 1]
|
206 |
+
x = dst_pts[:, 0]
|
207 |
+
y = dst_pts[:, 1]
|
208 |
+
[x, y].T = cv_trans * [u, v, 1].T
|
209 |
+
|
210 |
+
Parameters:
|
211 |
+
----------
|
212 |
+
@src_pts: Kx2 np.array
|
213 |
+
source points, each row is a pair of coordinates (x, y)
|
214 |
+
@dst_pts: Kx2 np.array
|
215 |
+
destination points, each row is a pair of transformed
|
216 |
+
coordinates (x, y)
|
217 |
+
reflective: True or False
|
218 |
+
if True:
|
219 |
+
use reflective similarity transform
|
220 |
+
else:
|
221 |
+
use non-reflective similarity transform
|
222 |
+
|
223 |
+
Returns:
|
224 |
+
----------
|
225 |
+
@cv2_trans: 2x3 np.array
|
226 |
+
transform matrix from src_pts to dst_pts, could be directly used
|
227 |
+
for cv2.warpAffine()
|
228 |
+
"""
|
229 |
+
trans, trans_inv = get_similarity_transform(src_pts, dst_pts, reflective)
|
230 |
+
cv2_trans = cvt_tform_mat_for_cv2(trans)
|
231 |
+
|
232 |
+
return cv2_trans
|
233 |
+
|
234 |
+
|
235 |
+
if __name__ == '__main__':
|
236 |
+
"""
|
237 |
+
u = [0, 6, -2]
|
238 |
+
v = [0, 3, 5]
|
239 |
+
x = [-1, 0, 4]
|
240 |
+
y = [-1, -10, 4]
|
241 |
+
|
242 |
+
# In Matlab, run:
|
243 |
+
#
|
244 |
+
# uv = [u'; v'];
|
245 |
+
# xy = [x'; y'];
|
246 |
+
# tform_sim=cp2tform(uv,xy,'similarity');
|
247 |
+
#
|
248 |
+
# trans = tform_sim.tdata.T
|
249 |
+
# ans =
|
250 |
+
# -0.0764 -1.6190 0
|
251 |
+
# 1.6190 -0.0764 0
|
252 |
+
# -3.2156 0.0290 1.0000
|
253 |
+
# trans_inv = tform_sim.tdata.Tinv
|
254 |
+
# ans =
|
255 |
+
#
|
256 |
+
# -0.0291 0.6163 0
|
257 |
+
# -0.6163 -0.0291 0
|
258 |
+
# -0.0756 1.9826 1.0000
|
259 |
+
# xy_m=tformfwd(tform_sim, u,v)
|
260 |
+
#
|
261 |
+
# xy_m =
|
262 |
+
#
|
263 |
+
# -3.2156 0.0290
|
264 |
+
# 1.1833 -9.9143
|
265 |
+
# 5.0323 2.8853
|
266 |
+
# uv_m=tforminv(tform_sim, x,y)
|
267 |
+
#
|
268 |
+
# uv_m =
|
269 |
+
#
|
270 |
+
# 0.5698 1.3953
|
271 |
+
# 6.0872 2.2733
|
272 |
+
# -2.6570 4.3314
|
273 |
+
"""
|
274 |
+
u = [0, 6, -2]
|
275 |
+
v = [0, 3, 5]
|
276 |
+
x = [-1, 0, 4]
|
277 |
+
y = [-1, -10, 4]
|
278 |
+
|
279 |
+
uv = np.array((u, v)).T
|
280 |
+
xy = np.array((x, y)).T
|
281 |
+
|
282 |
+
print('\n--->uv:')
|
283 |
+
print(uv)
|
284 |
+
print('\n--->xy:')
|
285 |
+
print(xy)
|
286 |
+
|
287 |
+
trans, trans_inv = get_similarity_transform(uv, xy)
|
288 |
+
|
289 |
+
print('\n--->trans matrix:')
|
290 |
+
print(trans)
|
291 |
+
|
292 |
+
print('\n--->trans_inv matrix:')
|
293 |
+
print(trans_inv)
|
294 |
+
|
295 |
+
print('\n---> apply transform to uv')
|
296 |
+
print('\nxy_m = uv_augmented * trans')
|
297 |
+
uv_aug = np.hstack((uv, np.ones((uv.shape[0], 1))))
|
298 |
+
xy_m = np.dot(uv_aug, trans)
|
299 |
+
print(xy_m)
|
300 |
+
|
301 |
+
print('\nxy_m = tformfwd(trans, uv)')
|
302 |
+
xy_m = tformfwd(trans, uv)
|
303 |
+
print(xy_m)
|
304 |
+
|
305 |
+
print('\n---> apply inverse transform to xy')
|
306 |
+
print('\nuv_m = xy_augmented * trans_inv')
|
307 |
+
xy_aug = np.hstack((xy, np.ones((xy.shape[0], 1))))
|
308 |
+
uv_m = np.dot(xy_aug, trans_inv)
|
309 |
+
print(uv_m)
|
310 |
+
|
311 |
+
print('\nuv_m = tformfwd(trans_inv, xy)')
|
312 |
+
uv_m = tformfwd(trans_inv, xy)
|
313 |
+
print(uv_m)
|
314 |
+
|
315 |
+
uv_m = tforminv(trans, xy)
|
316 |
+
print('\nuv_m = tforminv(trans, xy)')
|
317 |
+
print(uv_m)
|
extras/facexlib/detection/retinaface.py
ADDED
@@ -0,0 +1,366 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
import torch
|
4 |
+
import torch.nn as nn
|
5 |
+
import torch.nn.functional as F
|
6 |
+
from PIL import Image
|
7 |
+
from torchvision.models._utils import IntermediateLayerGetter as IntermediateLayerGetter
|
8 |
+
|
9 |
+
from extras.facexlib.detection.align_trans import get_reference_facial_points, warp_and_crop_face
|
10 |
+
from extras.facexlib.detection.retinaface_net import FPN, SSH, MobileNetV1, make_bbox_head, make_class_head, make_landmark_head
|
11 |
+
from extras.facexlib.detection.retinaface_utils import (PriorBox, batched_decode, batched_decode_landm, decode, decode_landm,
|
12 |
+
py_cpu_nms)
|
13 |
+
|
14 |
+
|
15 |
+
def generate_config(network_name):
|
16 |
+
|
17 |
+
cfg_mnet = {
|
18 |
+
'name': 'mobilenet0.25',
|
19 |
+
'min_sizes': [[16, 32], [64, 128], [256, 512]],
|
20 |
+
'steps': [8, 16, 32],
|
21 |
+
'variance': [0.1, 0.2],
|
22 |
+
'clip': False,
|
23 |
+
'loc_weight': 2.0,
|
24 |
+
'gpu_train': True,
|
25 |
+
'batch_size': 32,
|
26 |
+
'ngpu': 1,
|
27 |
+
'epoch': 250,
|
28 |
+
'decay1': 190,
|
29 |
+
'decay2': 220,
|
30 |
+
'image_size': 640,
|
31 |
+
'return_layers': {
|
32 |
+
'stage1': 1,
|
33 |
+
'stage2': 2,
|
34 |
+
'stage3': 3
|
35 |
+
},
|
36 |
+
'in_channel': 32,
|
37 |
+
'out_channel': 64
|
38 |
+
}
|
39 |
+
|
40 |
+
cfg_re50 = {
|
41 |
+
'name': 'Resnet50',
|
42 |
+
'min_sizes': [[16, 32], [64, 128], [256, 512]],
|
43 |
+
'steps': [8, 16, 32],
|
44 |
+
'variance': [0.1, 0.2],
|
45 |
+
'clip': False,
|
46 |
+
'loc_weight': 2.0,
|
47 |
+
'gpu_train': True,
|
48 |
+
'batch_size': 24,
|
49 |
+
'ngpu': 4,
|
50 |
+
'epoch': 100,
|
51 |
+
'decay1': 70,
|
52 |
+
'decay2': 90,
|
53 |
+
'image_size': 840,
|
54 |
+
'return_layers': {
|
55 |
+
'layer2': 1,
|
56 |
+
'layer3': 2,
|
57 |
+
'layer4': 3
|
58 |
+
},
|
59 |
+
'in_channel': 256,
|
60 |
+
'out_channel': 256
|
61 |
+
}
|
62 |
+
|
63 |
+
if network_name == 'mobile0.25':
|
64 |
+
return cfg_mnet
|
65 |
+
elif network_name == 'resnet50':
|
66 |
+
return cfg_re50
|
67 |
+
else:
|
68 |
+
raise NotImplementedError(f'network_name={network_name}')
|
69 |
+
|
70 |
+
|
71 |
+
class RetinaFace(nn.Module):
|
72 |
+
|
73 |
+
def __init__(self, network_name='resnet50', half=False, phase='test', device=None):
|
74 |
+
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if device is None else device
|
75 |
+
|
76 |
+
super(RetinaFace, self).__init__()
|
77 |
+
self.half_inference = half
|
78 |
+
cfg = generate_config(network_name)
|
79 |
+
self.backbone = cfg['name']
|
80 |
+
|
81 |
+
self.model_name = f'retinaface_{network_name}'
|
82 |
+
self.cfg = cfg
|
83 |
+
self.phase = phase
|
84 |
+
self.target_size, self.max_size = 1600, 2150
|
85 |
+
self.resize, self.scale, self.scale1 = 1., None, None
|
86 |
+
self.mean_tensor = torch.tensor([[[[104.]], [[117.]], [[123.]]]], device=self.device)
|
87 |
+
self.reference = get_reference_facial_points(default_square=True)
|
88 |
+
# Build network.
|
89 |
+
backbone = None
|
90 |
+
if cfg['name'] == 'mobilenet0.25':
|
91 |
+
backbone = MobileNetV1()
|
92 |
+
self.body = IntermediateLayerGetter(backbone, cfg['return_layers'])
|
93 |
+
elif cfg['name'] == 'Resnet50':
|
94 |
+
import torchvision.models as models
|
95 |
+
backbone = models.resnet50(weights=None)
|
96 |
+
self.body = IntermediateLayerGetter(backbone, cfg['return_layers'])
|
97 |
+
|
98 |
+
in_channels_stage2 = cfg['in_channel']
|
99 |
+
in_channels_list = [
|
100 |
+
in_channels_stage2 * 2,
|
101 |
+
in_channels_stage2 * 4,
|
102 |
+
in_channels_stage2 * 8,
|
103 |
+
]
|
104 |
+
|
105 |
+
out_channels = cfg['out_channel']
|
106 |
+
self.fpn = FPN(in_channels_list, out_channels)
|
107 |
+
self.ssh1 = SSH(out_channels, out_channels)
|
108 |
+
self.ssh2 = SSH(out_channels, out_channels)
|
109 |
+
self.ssh3 = SSH(out_channels, out_channels)
|
110 |
+
|
111 |
+
self.ClassHead = make_class_head(fpn_num=3, inchannels=cfg['out_channel'])
|
112 |
+
self.BboxHead = make_bbox_head(fpn_num=3, inchannels=cfg['out_channel'])
|
113 |
+
self.LandmarkHead = make_landmark_head(fpn_num=3, inchannels=cfg['out_channel'])
|
114 |
+
|
115 |
+
self.to(self.device)
|
116 |
+
self.eval()
|
117 |
+
if self.half_inference:
|
118 |
+
self.half()
|
119 |
+
|
120 |
+
def forward(self, inputs):
|
121 |
+
out = self.body(inputs)
|
122 |
+
|
123 |
+
if self.backbone == 'mobilenet0.25' or self.backbone == 'Resnet50':
|
124 |
+
out = list(out.values())
|
125 |
+
# FPN
|
126 |
+
fpn = self.fpn(out)
|
127 |
+
|
128 |
+
# SSH
|
129 |
+
feature1 = self.ssh1(fpn[0])
|
130 |
+
feature2 = self.ssh2(fpn[1])
|
131 |
+
feature3 = self.ssh3(fpn[2])
|
132 |
+
features = [feature1, feature2, feature3]
|
133 |
+
|
134 |
+
bbox_regressions = torch.cat([self.BboxHead[i](feature) for i, feature in enumerate(features)], dim=1)
|
135 |
+
classifications = torch.cat([self.ClassHead[i](feature) for i, feature in enumerate(features)], dim=1)
|
136 |
+
tmp = [self.LandmarkHead[i](feature) for i, feature in enumerate(features)]
|
137 |
+
ldm_regressions = (torch.cat(tmp, dim=1))
|
138 |
+
|
139 |
+
if self.phase == 'train':
|
140 |
+
output = (bbox_regressions, classifications, ldm_regressions)
|
141 |
+
else:
|
142 |
+
output = (bbox_regressions, F.softmax(classifications, dim=-1), ldm_regressions)
|
143 |
+
return output
|
144 |
+
|
145 |
+
def __detect_faces(self, inputs):
|
146 |
+
# get scale
|
147 |
+
height, width = inputs.shape[2:]
|
148 |
+
self.scale = torch.tensor([width, height, width, height], dtype=torch.float32, device=self.device)
|
149 |
+
tmp = [width, height, width, height, width, height, width, height, width, height]
|
150 |
+
self.scale1 = torch.tensor(tmp, dtype=torch.float32, device=self.device)
|
151 |
+
|
152 |
+
# forawrd
|
153 |
+
inputs = inputs.to(self.device)
|
154 |
+
if self.half_inference:
|
155 |
+
inputs = inputs.half()
|
156 |
+
loc, conf, landmarks = self(inputs)
|
157 |
+
|
158 |
+
# get priorbox
|
159 |
+
priorbox = PriorBox(self.cfg, image_size=inputs.shape[2:])
|
160 |
+
priors = priorbox.forward().to(self.device)
|
161 |
+
|
162 |
+
return loc, conf, landmarks, priors
|
163 |
+
|
164 |
+
# single image detection
|
165 |
+
def transform(self, image, use_origin_size):
|
166 |
+
# convert to opencv format
|
167 |
+
if isinstance(image, Image.Image):
|
168 |
+
image = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
|
169 |
+
image = image.astype(np.float32)
|
170 |
+
|
171 |
+
# testing scale
|
172 |
+
im_size_min = np.min(image.shape[0:2])
|
173 |
+
im_size_max = np.max(image.shape[0:2])
|
174 |
+
resize = float(self.target_size) / float(im_size_min)
|
175 |
+
|
176 |
+
# prevent bigger axis from being more than max_size
|
177 |
+
if np.round(resize * im_size_max) > self.max_size:
|
178 |
+
resize = float(self.max_size) / float(im_size_max)
|
179 |
+
resize = 1 if use_origin_size else resize
|
180 |
+
|
181 |
+
# resize
|
182 |
+
if resize != 1:
|
183 |
+
image = cv2.resize(image, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR)
|
184 |
+
|
185 |
+
# convert to torch.tensor format
|
186 |
+
# image -= (104, 117, 123)
|
187 |
+
image = image.transpose(2, 0, 1)
|
188 |
+
image = torch.from_numpy(image).unsqueeze(0)
|
189 |
+
|
190 |
+
return image, resize
|
191 |
+
|
192 |
+
def detect_faces(
|
193 |
+
self,
|
194 |
+
image,
|
195 |
+
conf_threshold=0.8,
|
196 |
+
nms_threshold=0.4,
|
197 |
+
use_origin_size=True,
|
198 |
+
):
|
199 |
+
image, self.resize = self.transform(image, use_origin_size)
|
200 |
+
image = image.to(self.device)
|
201 |
+
if self.half_inference:
|
202 |
+
image = image.half()
|
203 |
+
image = image - self.mean_tensor
|
204 |
+
|
205 |
+
loc, conf, landmarks, priors = self.__detect_faces(image)
|
206 |
+
|
207 |
+
boxes = decode(loc.data.squeeze(0), priors.data, self.cfg['variance'])
|
208 |
+
boxes = boxes * self.scale / self.resize
|
209 |
+
boxes = boxes.cpu().numpy()
|
210 |
+
|
211 |
+
scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
|
212 |
+
|
213 |
+
landmarks = decode_landm(landmarks.squeeze(0), priors, self.cfg['variance'])
|
214 |
+
landmarks = landmarks * self.scale1 / self.resize
|
215 |
+
landmarks = landmarks.cpu().numpy()
|
216 |
+
|
217 |
+
# ignore low scores
|
218 |
+
inds = np.where(scores > conf_threshold)[0]
|
219 |
+
boxes, landmarks, scores = boxes[inds], landmarks[inds], scores[inds]
|
220 |
+
|
221 |
+
# sort
|
222 |
+
order = scores.argsort()[::-1]
|
223 |
+
boxes, landmarks, scores = boxes[order], landmarks[order], scores[order]
|
224 |
+
|
225 |
+
# do NMS
|
226 |
+
bounding_boxes = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
|
227 |
+
keep = py_cpu_nms(bounding_boxes, nms_threshold)
|
228 |
+
bounding_boxes, landmarks = bounding_boxes[keep, :], landmarks[keep]
|
229 |
+
# self.t['forward_pass'].toc()
|
230 |
+
# print(self.t['forward_pass'].average_time)
|
231 |
+
# import sys
|
232 |
+
# sys.stdout.flush()
|
233 |
+
return np.concatenate((bounding_boxes, landmarks), axis=1)
|
234 |
+
|
235 |
+
def __align_multi(self, image, boxes, landmarks, limit=None):
|
236 |
+
|
237 |
+
if len(boxes) < 1:
|
238 |
+
return [], []
|
239 |
+
|
240 |
+
if limit:
|
241 |
+
boxes = boxes[:limit]
|
242 |
+
landmarks = landmarks[:limit]
|
243 |
+
|
244 |
+
faces = []
|
245 |
+
for landmark in landmarks:
|
246 |
+
facial5points = [[landmark[2 * j], landmark[2 * j + 1]] for j in range(5)]
|
247 |
+
|
248 |
+
warped_face = warp_and_crop_face(np.array(image), facial5points, self.reference, crop_size=(112, 112))
|
249 |
+
faces.append(warped_face)
|
250 |
+
|
251 |
+
return np.concatenate((boxes, landmarks), axis=1), faces
|
252 |
+
|
253 |
+
def align_multi(self, img, conf_threshold=0.8, limit=None):
|
254 |
+
|
255 |
+
rlt = self.detect_faces(img, conf_threshold=conf_threshold)
|
256 |
+
boxes, landmarks = rlt[:, 0:5], rlt[:, 5:]
|
257 |
+
|
258 |
+
return self.__align_multi(img, boxes, landmarks, limit)
|
259 |
+
|
260 |
+
# batched detection
|
261 |
+
def batched_transform(self, frames, use_origin_size):
|
262 |
+
"""
|
263 |
+
Arguments:
|
264 |
+
frames: a list of PIL.Image, or torch.Tensor(shape=[n, h, w, c],
|
265 |
+
type=np.float32, BGR format).
|
266 |
+
use_origin_size: whether to use origin size.
|
267 |
+
"""
|
268 |
+
from_PIL = True if isinstance(frames[0], Image.Image) else False
|
269 |
+
|
270 |
+
# convert to opencv format
|
271 |
+
if from_PIL:
|
272 |
+
frames = [cv2.cvtColor(np.asarray(frame), cv2.COLOR_RGB2BGR) for frame in frames]
|
273 |
+
frames = np.asarray(frames, dtype=np.float32)
|
274 |
+
|
275 |
+
# testing scale
|
276 |
+
im_size_min = np.min(frames[0].shape[0:2])
|
277 |
+
im_size_max = np.max(frames[0].shape[0:2])
|
278 |
+
resize = float(self.target_size) / float(im_size_min)
|
279 |
+
|
280 |
+
# prevent bigger axis from being more than max_size
|
281 |
+
if np.round(resize * im_size_max) > self.max_size:
|
282 |
+
resize = float(self.max_size) / float(im_size_max)
|
283 |
+
resize = 1 if use_origin_size else resize
|
284 |
+
|
285 |
+
# resize
|
286 |
+
if resize != 1:
|
287 |
+
if not from_PIL:
|
288 |
+
frames = F.interpolate(frames, scale_factor=resize)
|
289 |
+
else:
|
290 |
+
frames = [
|
291 |
+
cv2.resize(frame, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR)
|
292 |
+
for frame in frames
|
293 |
+
]
|
294 |
+
|
295 |
+
# convert to torch.tensor format
|
296 |
+
if not from_PIL:
|
297 |
+
frames = frames.transpose(1, 2).transpose(1, 3).contiguous()
|
298 |
+
else:
|
299 |
+
frames = frames.transpose((0, 3, 1, 2))
|
300 |
+
frames = torch.from_numpy(frames)
|
301 |
+
|
302 |
+
return frames, resize
|
303 |
+
|
304 |
+
def batched_detect_faces(self, frames, conf_threshold=0.8, nms_threshold=0.4, use_origin_size=True):
|
305 |
+
"""
|
306 |
+
Arguments:
|
307 |
+
frames: a list of PIL.Image, or np.array(shape=[n, h, w, c],
|
308 |
+
type=np.uint8, BGR format).
|
309 |
+
conf_threshold: confidence threshold.
|
310 |
+
nms_threshold: nms threshold.
|
311 |
+
use_origin_size: whether to use origin size.
|
312 |
+
Returns:
|
313 |
+
final_bounding_boxes: list of np.array ([n_boxes, 5],
|
314 |
+
type=np.float32).
|
315 |
+
final_landmarks: list of np.array ([n_boxes, 10], type=np.float32).
|
316 |
+
"""
|
317 |
+
# self.t['forward_pass'].tic()
|
318 |
+
frames, self.resize = self.batched_transform(frames, use_origin_size)
|
319 |
+
frames = frames.to(self.device)
|
320 |
+
frames = frames - self.mean_tensor
|
321 |
+
|
322 |
+
b_loc, b_conf, b_landmarks, priors = self.__detect_faces(frames)
|
323 |
+
|
324 |
+
final_bounding_boxes, final_landmarks = [], []
|
325 |
+
|
326 |
+
# decode
|
327 |
+
priors = priors.unsqueeze(0)
|
328 |
+
b_loc = batched_decode(b_loc, priors, self.cfg['variance']) * self.scale / self.resize
|
329 |
+
b_landmarks = batched_decode_landm(b_landmarks, priors, self.cfg['variance']) * self.scale1 / self.resize
|
330 |
+
b_conf = b_conf[:, :, 1]
|
331 |
+
|
332 |
+
# index for selection
|
333 |
+
b_indice = b_conf > conf_threshold
|
334 |
+
|
335 |
+
# concat
|
336 |
+
b_loc_and_conf = torch.cat((b_loc, b_conf.unsqueeze(-1)), dim=2).float()
|
337 |
+
|
338 |
+
for pred, landm, inds in zip(b_loc_and_conf, b_landmarks, b_indice):
|
339 |
+
|
340 |
+
# ignore low scores
|
341 |
+
pred, landm = pred[inds, :], landm[inds, :]
|
342 |
+
if pred.shape[0] == 0:
|
343 |
+
final_bounding_boxes.append(np.array([], dtype=np.float32))
|
344 |
+
final_landmarks.append(np.array([], dtype=np.float32))
|
345 |
+
continue
|
346 |
+
|
347 |
+
# sort
|
348 |
+
# order = score.argsort(descending=True)
|
349 |
+
# box, landm, score = box[order], landm[order], score[order]
|
350 |
+
|
351 |
+
# to CPU
|
352 |
+
bounding_boxes, landm = pred.cpu().numpy(), landm.cpu().numpy()
|
353 |
+
|
354 |
+
# NMS
|
355 |
+
keep = py_cpu_nms(bounding_boxes, nms_threshold)
|
356 |
+
bounding_boxes, landmarks = bounding_boxes[keep, :], landm[keep]
|
357 |
+
|
358 |
+
# append
|
359 |
+
final_bounding_boxes.append(bounding_boxes)
|
360 |
+
final_landmarks.append(landmarks)
|
361 |
+
# self.t['forward_pass'].toc(average=True)
|
362 |
+
# self.batch_time += self.t['forward_pass'].diff
|
363 |
+
# self.total_frame += len(frames)
|
364 |
+
# print(self.batch_time / self.total_frame)
|
365 |
+
|
366 |
+
return final_bounding_boxes, final_landmarks
|
extras/facexlib/detection/retinaface_net.py
ADDED
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
import torch.nn.functional as F
|
4 |
+
|
5 |
+
|
6 |
+
def conv_bn(inp, oup, stride=1, leaky=0):
|
7 |
+
return nn.Sequential(
|
8 |
+
nn.Conv2d(inp, oup, 3, stride, 1, bias=False), nn.BatchNorm2d(oup),
|
9 |
+
nn.LeakyReLU(negative_slope=leaky, inplace=True))
|
10 |
+
|
11 |
+
|
12 |
+
def conv_bn_no_relu(inp, oup, stride):
|
13 |
+
return nn.Sequential(
|
14 |
+
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
|
15 |
+
nn.BatchNorm2d(oup),
|
16 |
+
)
|
17 |
+
|
18 |
+
|
19 |
+
def conv_bn1X1(inp, oup, stride, leaky=0):
|
20 |
+
return nn.Sequential(
|
21 |
+
nn.Conv2d(inp, oup, 1, stride, padding=0, bias=False), nn.BatchNorm2d(oup),
|
22 |
+
nn.LeakyReLU(negative_slope=leaky, inplace=True))
|
23 |
+
|
24 |
+
|
25 |
+
def conv_dw(inp, oup, stride, leaky=0.1):
|
26 |
+
return nn.Sequential(
|
27 |
+
nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
|
28 |
+
nn.BatchNorm2d(inp),
|
29 |
+
nn.LeakyReLU(negative_slope=leaky, inplace=True),
|
30 |
+
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
|
31 |
+
nn.BatchNorm2d(oup),
|
32 |
+
nn.LeakyReLU(negative_slope=leaky, inplace=True),
|
33 |
+
)
|
34 |
+
|
35 |
+
|
36 |
+
class SSH(nn.Module):
|
37 |
+
|
38 |
+
def __init__(self, in_channel, out_channel):
|
39 |
+
super(SSH, self).__init__()
|
40 |
+
assert out_channel % 4 == 0
|
41 |
+
leaky = 0
|
42 |
+
if (out_channel <= 64):
|
43 |
+
leaky = 0.1
|
44 |
+
self.conv3X3 = conv_bn_no_relu(in_channel, out_channel // 2, stride=1)
|
45 |
+
|
46 |
+
self.conv5X5_1 = conv_bn(in_channel, out_channel // 4, stride=1, leaky=leaky)
|
47 |
+
self.conv5X5_2 = conv_bn_no_relu(out_channel // 4, out_channel // 4, stride=1)
|
48 |
+
|
49 |
+
self.conv7X7_2 = conv_bn(out_channel // 4, out_channel // 4, stride=1, leaky=leaky)
|
50 |
+
self.conv7x7_3 = conv_bn_no_relu(out_channel // 4, out_channel // 4, stride=1)
|
51 |
+
|
52 |
+
def forward(self, input):
|
53 |
+
conv3X3 = self.conv3X3(input)
|
54 |
+
|
55 |
+
conv5X5_1 = self.conv5X5_1(input)
|
56 |
+
conv5X5 = self.conv5X5_2(conv5X5_1)
|
57 |
+
|
58 |
+
conv7X7_2 = self.conv7X7_2(conv5X5_1)
|
59 |
+
conv7X7 = self.conv7x7_3(conv7X7_2)
|
60 |
+
|
61 |
+
out = torch.cat([conv3X3, conv5X5, conv7X7], dim=1)
|
62 |
+
out = F.relu(out)
|
63 |
+
return out
|
64 |
+
|
65 |
+
|
66 |
+
class FPN(nn.Module):
|
67 |
+
|
68 |
+
def __init__(self, in_channels_list, out_channels):
|
69 |
+
super(FPN, self).__init__()
|
70 |
+
leaky = 0
|
71 |
+
if (out_channels <= 64):
|
72 |
+
leaky = 0.1
|
73 |
+
self.output1 = conv_bn1X1(in_channels_list[0], out_channels, stride=1, leaky=leaky)
|
74 |
+
self.output2 = conv_bn1X1(in_channels_list[1], out_channels, stride=1, leaky=leaky)
|
75 |
+
self.output3 = conv_bn1X1(in_channels_list[2], out_channels, stride=1, leaky=leaky)
|
76 |
+
|
77 |
+
self.merge1 = conv_bn(out_channels, out_channels, leaky=leaky)
|
78 |
+
self.merge2 = conv_bn(out_channels, out_channels, leaky=leaky)
|
79 |
+
|
80 |
+
def forward(self, input):
|
81 |
+
# names = list(input.keys())
|
82 |
+
# input = list(input.values())
|
83 |
+
|
84 |
+
output1 = self.output1(input[0])
|
85 |
+
output2 = self.output2(input[1])
|
86 |
+
output3 = self.output3(input[2])
|
87 |
+
|
88 |
+
up3 = F.interpolate(output3, size=[output2.size(2), output2.size(3)], mode='nearest')
|
89 |
+
output2 = output2 + up3
|
90 |
+
output2 = self.merge2(output2)
|
91 |
+
|
92 |
+
up2 = F.interpolate(output2, size=[output1.size(2), output1.size(3)], mode='nearest')
|
93 |
+
output1 = output1 + up2
|
94 |
+
output1 = self.merge1(output1)
|
95 |
+
|
96 |
+
out = [output1, output2, output3]
|
97 |
+
return out
|
98 |
+
|
99 |
+
|
100 |
+
class MobileNetV1(nn.Module):
|
101 |
+
|
102 |
+
def __init__(self):
|
103 |
+
super(MobileNetV1, self).__init__()
|
104 |
+
self.stage1 = nn.Sequential(
|
105 |
+
conv_bn(3, 8, 2, leaky=0.1), # 3
|
106 |
+
conv_dw(8, 16, 1), # 7
|
107 |
+
conv_dw(16, 32, 2), # 11
|
108 |
+
conv_dw(32, 32, 1), # 19
|
109 |
+
conv_dw(32, 64, 2), # 27
|
110 |
+
conv_dw(64, 64, 1), # 43
|
111 |
+
)
|
112 |
+
self.stage2 = nn.Sequential(
|
113 |
+
conv_dw(64, 128, 2), # 43 + 16 = 59
|
114 |
+
conv_dw(128, 128, 1), # 59 + 32 = 91
|
115 |
+
conv_dw(128, 128, 1), # 91 + 32 = 123
|
116 |
+
conv_dw(128, 128, 1), # 123 + 32 = 155
|
117 |
+
conv_dw(128, 128, 1), # 155 + 32 = 187
|
118 |
+
conv_dw(128, 128, 1), # 187 + 32 = 219
|
119 |
+
)
|
120 |
+
self.stage3 = nn.Sequential(
|
121 |
+
conv_dw(128, 256, 2), # 219 +3 2 = 241
|
122 |
+
conv_dw(256, 256, 1), # 241 + 64 = 301
|
123 |
+
)
|
124 |
+
self.avg = nn.AdaptiveAvgPool2d((1, 1))
|
125 |
+
self.fc = nn.Linear(256, 1000)
|
126 |
+
|
127 |
+
def forward(self, x):
|
128 |
+
x = self.stage1(x)
|
129 |
+
x = self.stage2(x)
|
130 |
+
x = self.stage3(x)
|
131 |
+
x = self.avg(x)
|
132 |
+
# x = self.model(x)
|
133 |
+
x = x.view(-1, 256)
|
134 |
+
x = self.fc(x)
|
135 |
+
return x
|
136 |
+
|
137 |
+
|
138 |
+
class ClassHead(nn.Module):
|
139 |
+
|
140 |
+
def __init__(self, inchannels=512, num_anchors=3):
|
141 |
+
super(ClassHead, self).__init__()
|
142 |
+
self.num_anchors = num_anchors
|
143 |
+
self.conv1x1 = nn.Conv2d(inchannels, self.num_anchors * 2, kernel_size=(1, 1), stride=1, padding=0)
|
144 |
+
|
145 |
+
def forward(self, x):
|
146 |
+
out = self.conv1x1(x)
|
147 |
+
out = out.permute(0, 2, 3, 1).contiguous()
|
148 |
+
|
149 |
+
return out.view(out.shape[0], -1, 2)
|
150 |
+
|
151 |
+
|
152 |
+
class BboxHead(nn.Module):
|
153 |
+
|
154 |
+
def __init__(self, inchannels=512, num_anchors=3):
|
155 |
+
super(BboxHead, self).__init__()
|
156 |
+
self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 4, kernel_size=(1, 1), stride=1, padding=0)
|
157 |
+
|
158 |
+
def forward(self, x):
|
159 |
+
out = self.conv1x1(x)
|
160 |
+
out = out.permute(0, 2, 3, 1).contiguous()
|
161 |
+
|
162 |
+
return out.view(out.shape[0], -1, 4)
|
163 |
+
|
164 |
+
|
165 |
+
class LandmarkHead(nn.Module):
|
166 |
+
|
167 |
+
def __init__(self, inchannels=512, num_anchors=3):
|
168 |
+
super(LandmarkHead, self).__init__()
|
169 |
+
self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 10, kernel_size=(1, 1), stride=1, padding=0)
|
170 |
+
|
171 |
+
def forward(self, x):
|
172 |
+
out = self.conv1x1(x)
|
173 |
+
out = out.permute(0, 2, 3, 1).contiguous()
|
174 |
+
|
175 |
+
return out.view(out.shape[0], -1, 10)
|
176 |
+
|
177 |
+
|
178 |
+
def make_class_head(fpn_num=3, inchannels=64, anchor_num=2):
|
179 |
+
classhead = nn.ModuleList()
|
180 |
+
for i in range(fpn_num):
|
181 |
+
classhead.append(ClassHead(inchannels, anchor_num))
|
182 |
+
return classhead
|
183 |
+
|
184 |
+
|
185 |
+
def make_bbox_head(fpn_num=3, inchannels=64, anchor_num=2):
|
186 |
+
bboxhead = nn.ModuleList()
|
187 |
+
for i in range(fpn_num):
|
188 |
+
bboxhead.append(BboxHead(inchannels, anchor_num))
|
189 |
+
return bboxhead
|
190 |
+
|
191 |
+
|
192 |
+
def make_landmark_head(fpn_num=3, inchannels=64, anchor_num=2):
|
193 |
+
landmarkhead = nn.ModuleList()
|
194 |
+
for i in range(fpn_num):
|
195 |
+
landmarkhead.append(LandmarkHead(inchannels, anchor_num))
|
196 |
+
return landmarkhead
|
extras/facexlib/detection/retinaface_utils.py
ADDED
@@ -0,0 +1,421 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import torch
|
3 |
+
import torchvision
|
4 |
+
from itertools import product as product
|
5 |
+
from math import ceil
|
6 |
+
|
7 |
+
|
8 |
+
class PriorBox(object):
|
9 |
+
|
10 |
+
def __init__(self, cfg, image_size=None, phase='train'):
|
11 |
+
super(PriorBox, self).__init__()
|
12 |
+
self.min_sizes = cfg['min_sizes']
|
13 |
+
self.steps = cfg['steps']
|
14 |
+
self.clip = cfg['clip']
|
15 |
+
self.image_size = image_size
|
16 |
+
self.feature_maps = [[ceil(self.image_size[0] / step), ceil(self.image_size[1] / step)] for step in self.steps]
|
17 |
+
self.name = 's'
|
18 |
+
|
19 |
+
def forward(self):
|
20 |
+
anchors = []
|
21 |
+
for k, f in enumerate(self.feature_maps):
|
22 |
+
min_sizes = self.min_sizes[k]
|
23 |
+
for i, j in product(range(f[0]), range(f[1])):
|
24 |
+
for min_size in min_sizes:
|
25 |
+
s_kx = min_size / self.image_size[1]
|
26 |
+
s_ky = min_size / self.image_size[0]
|
27 |
+
dense_cx = [x * self.steps[k] / self.image_size[1] for x in [j + 0.5]]
|
28 |
+
dense_cy = [y * self.steps[k] / self.image_size[0] for y in [i + 0.5]]
|
29 |
+
for cy, cx in product(dense_cy, dense_cx):
|
30 |
+
anchors += [cx, cy, s_kx, s_ky]
|
31 |
+
|
32 |
+
# back to torch land
|
33 |
+
output = torch.Tensor(anchors).view(-1, 4)
|
34 |
+
if self.clip:
|
35 |
+
output.clamp_(max=1, min=0)
|
36 |
+
return output
|
37 |
+
|
38 |
+
|
39 |
+
def py_cpu_nms(dets, thresh):
|
40 |
+
"""Pure Python NMS baseline."""
|
41 |
+
keep = torchvision.ops.nms(
|
42 |
+
boxes=torch.Tensor(dets[:, :4]),
|
43 |
+
scores=torch.Tensor(dets[:, 4]),
|
44 |
+
iou_threshold=thresh,
|
45 |
+
)
|
46 |
+
|
47 |
+
return list(keep)
|
48 |
+
|
49 |
+
|
50 |
+
def point_form(boxes):
|
51 |
+
""" Convert prior_boxes to (xmin, ymin, xmax, ymax)
|
52 |
+
representation for comparison to point form ground truth data.
|
53 |
+
Args:
|
54 |
+
boxes: (tensor) center-size default boxes from priorbox layers.
|
55 |
+
Return:
|
56 |
+
boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
|
57 |
+
"""
|
58 |
+
return torch.cat(
|
59 |
+
(
|
60 |
+
boxes[:, :2] - boxes[:, 2:] / 2, # xmin, ymin
|
61 |
+
boxes[:, :2] + boxes[:, 2:] / 2),
|
62 |
+
1) # xmax, ymax
|
63 |
+
|
64 |
+
|
65 |
+
def center_size(boxes):
|
66 |
+
""" Convert prior_boxes to (cx, cy, w, h)
|
67 |
+
representation for comparison to center-size form ground truth data.
|
68 |
+
Args:
|
69 |
+
boxes: (tensor) point_form boxes
|
70 |
+
Return:
|
71 |
+
boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
|
72 |
+
"""
|
73 |
+
return torch.cat(
|
74 |
+
(boxes[:, 2:] + boxes[:, :2]) / 2, # cx, cy
|
75 |
+
boxes[:, 2:] - boxes[:, :2],
|
76 |
+
1) # w, h
|
77 |
+
|
78 |
+
|
79 |
+
def intersect(box_a, box_b):
|
80 |
+
""" We resize both tensors to [A,B,2] without new malloc:
|
81 |
+
[A,2] -> [A,1,2] -> [A,B,2]
|
82 |
+
[B,2] -> [1,B,2] -> [A,B,2]
|
83 |
+
Then we compute the area of intersect between box_a and box_b.
|
84 |
+
Args:
|
85 |
+
box_a: (tensor) bounding boxes, Shape: [A,4].
|
86 |
+
box_b: (tensor) bounding boxes, Shape: [B,4].
|
87 |
+
Return:
|
88 |
+
(tensor) intersection area, Shape: [A,B].
|
89 |
+
"""
|
90 |
+
A = box_a.size(0)
|
91 |
+
B = box_b.size(0)
|
92 |
+
max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2), box_b[:, 2:].unsqueeze(0).expand(A, B, 2))
|
93 |
+
min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2), box_b[:, :2].unsqueeze(0).expand(A, B, 2))
|
94 |
+
inter = torch.clamp((max_xy - min_xy), min=0)
|
95 |
+
return inter[:, :, 0] * inter[:, :, 1]
|
96 |
+
|
97 |
+
|
98 |
+
def jaccard(box_a, box_b):
|
99 |
+
"""Compute the jaccard overlap of two sets of boxes. The jaccard overlap
|
100 |
+
is simply the intersection over union of two boxes. Here we operate on
|
101 |
+
ground truth boxes and default boxes.
|
102 |
+
E.g.:
|
103 |
+
A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)
|
104 |
+
Args:
|
105 |
+
box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4]
|
106 |
+
box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4]
|
107 |
+
Return:
|
108 |
+
jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)]
|
109 |
+
"""
|
110 |
+
inter = intersect(box_a, box_b)
|
111 |
+
area_a = ((box_a[:, 2] - box_a[:, 0]) * (box_a[:, 3] - box_a[:, 1])).unsqueeze(1).expand_as(inter) # [A,B]
|
112 |
+
area_b = ((box_b[:, 2] - box_b[:, 0]) * (box_b[:, 3] - box_b[:, 1])).unsqueeze(0).expand_as(inter) # [A,B]
|
113 |
+
union = area_a + area_b - inter
|
114 |
+
return inter / union # [A,B]
|
115 |
+
|
116 |
+
|
117 |
+
def matrix_iou(a, b):
|
118 |
+
"""
|
119 |
+
return iou of a and b, numpy version for data augenmentation
|
120 |
+
"""
|
121 |
+
lt = np.maximum(a[:, np.newaxis, :2], b[:, :2])
|
122 |
+
rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])
|
123 |
+
|
124 |
+
area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2)
|
125 |
+
area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)
|
126 |
+
area_b = np.prod(b[:, 2:] - b[:, :2], axis=1)
|
127 |
+
return area_i / (area_a[:, np.newaxis] + area_b - area_i)
|
128 |
+
|
129 |
+
|
130 |
+
def matrix_iof(a, b):
|
131 |
+
"""
|
132 |
+
return iof of a and b, numpy version for data augenmentation
|
133 |
+
"""
|
134 |
+
lt = np.maximum(a[:, np.newaxis, :2], b[:, :2])
|
135 |
+
rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])
|
136 |
+
|
137 |
+
area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2)
|
138 |
+
area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)
|
139 |
+
return area_i / np.maximum(area_a[:, np.newaxis], 1)
|
140 |
+
|
141 |
+
|
142 |
+
def match(threshold, truths, priors, variances, labels, landms, loc_t, conf_t, landm_t, idx):
|
143 |
+
"""Match each prior box with the ground truth box of the highest jaccard
|
144 |
+
overlap, encode the bounding boxes, then return the matched indices
|
145 |
+
corresponding to both confidence and location preds.
|
146 |
+
Args:
|
147 |
+
threshold: (float) The overlap threshold used when matching boxes.
|
148 |
+
truths: (tensor) Ground truth boxes, Shape: [num_obj, 4].
|
149 |
+
priors: (tensor) Prior boxes from priorbox layers, Shape: [n_priors,4].
|
150 |
+
variances: (tensor) Variances corresponding to each prior coord,
|
151 |
+
Shape: [num_priors, 4].
|
152 |
+
labels: (tensor) All the class labels for the image, Shape: [num_obj].
|
153 |
+
landms: (tensor) Ground truth landms, Shape [num_obj, 10].
|
154 |
+
loc_t: (tensor) Tensor to be filled w/ encoded location targets.
|
155 |
+
conf_t: (tensor) Tensor to be filled w/ matched indices for conf preds.
|
156 |
+
landm_t: (tensor) Tensor to be filled w/ encoded landm targets.
|
157 |
+
idx: (int) current batch index
|
158 |
+
Return:
|
159 |
+
The matched indices corresponding to 1)location 2)confidence
|
160 |
+
3)landm preds.
|
161 |
+
"""
|
162 |
+
# jaccard index
|
163 |
+
overlaps = jaccard(truths, point_form(priors))
|
164 |
+
# (Bipartite Matching)
|
165 |
+
# [1,num_objects] best prior for each ground truth
|
166 |
+
best_prior_overlap, best_prior_idx = overlaps.max(1, keepdim=True)
|
167 |
+
|
168 |
+
# ignore hard gt
|
169 |
+
valid_gt_idx = best_prior_overlap[:, 0] >= 0.2
|
170 |
+
best_prior_idx_filter = best_prior_idx[valid_gt_idx, :]
|
171 |
+
if best_prior_idx_filter.shape[0] <= 0:
|
172 |
+
loc_t[idx] = 0
|
173 |
+
conf_t[idx] = 0
|
174 |
+
return
|
175 |
+
|
176 |
+
# [1,num_priors] best ground truth for each prior
|
177 |
+
best_truth_overlap, best_truth_idx = overlaps.max(0, keepdim=True)
|
178 |
+
best_truth_idx.squeeze_(0)
|
179 |
+
best_truth_overlap.squeeze_(0)
|
180 |
+
best_prior_idx.squeeze_(1)
|
181 |
+
best_prior_idx_filter.squeeze_(1)
|
182 |
+
best_prior_overlap.squeeze_(1)
|
183 |
+
best_truth_overlap.index_fill_(0, best_prior_idx_filter, 2) # ensure best prior
|
184 |
+
# TODO refactor: index best_prior_idx with long tensor
|
185 |
+
# ensure every gt matches with its prior of max overlap
|
186 |
+
for j in range(best_prior_idx.size(0)): # 判别此anchor是预测哪一个boxes
|
187 |
+
best_truth_idx[best_prior_idx[j]] = j
|
188 |
+
matches = truths[best_truth_idx] # Shape: [num_priors,4] 此处为每一个anchor对应的bbox取出来
|
189 |
+
conf = labels[best_truth_idx] # Shape: [num_priors] 此处为每一个anchor对应的label取出来
|
190 |
+
conf[best_truth_overlap < threshold] = 0 # label as background overlap<0.35的全部作为负样本
|
191 |
+
loc = encode(matches, priors, variances)
|
192 |
+
|
193 |
+
matches_landm = landms[best_truth_idx]
|
194 |
+
landm = encode_landm(matches_landm, priors, variances)
|
195 |
+
loc_t[idx] = loc # [num_priors,4] encoded offsets to learn
|
196 |
+
conf_t[idx] = conf # [num_priors] top class label for each prior
|
197 |
+
landm_t[idx] = landm
|
198 |
+
|
199 |
+
|
200 |
+
def encode(matched, priors, variances):
|
201 |
+
"""Encode the variances from the priorbox layers into the ground truth boxes
|
202 |
+
we have matched (based on jaccard overlap) with the prior boxes.
|
203 |
+
Args:
|
204 |
+
matched: (tensor) Coords of ground truth for each prior in point-form
|
205 |
+
Shape: [num_priors, 4].
|
206 |
+
priors: (tensor) Prior boxes in center-offset form
|
207 |
+
Shape: [num_priors,4].
|
208 |
+
variances: (list[float]) Variances of priorboxes
|
209 |
+
Return:
|
210 |
+
encoded boxes (tensor), Shape: [num_priors, 4]
|
211 |
+
"""
|
212 |
+
|
213 |
+
# dist b/t match center and prior's center
|
214 |
+
g_cxcy = (matched[:, :2] + matched[:, 2:]) / 2 - priors[:, :2]
|
215 |
+
# encode variance
|
216 |
+
g_cxcy /= (variances[0] * priors[:, 2:])
|
217 |
+
# match wh / prior wh
|
218 |
+
g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]
|
219 |
+
g_wh = torch.log(g_wh) / variances[1]
|
220 |
+
# return target for smooth_l1_loss
|
221 |
+
return torch.cat([g_cxcy, g_wh], 1) # [num_priors,4]
|
222 |
+
|
223 |
+
|
224 |
+
def encode_landm(matched, priors, variances):
|
225 |
+
"""Encode the variances from the priorbox layers into the ground truth boxes
|
226 |
+
we have matched (based on jaccard overlap) with the prior boxes.
|
227 |
+
Args:
|
228 |
+
matched: (tensor) Coords of ground truth for each prior in point-form
|
229 |
+
Shape: [num_priors, 10].
|
230 |
+
priors: (tensor) Prior boxes in center-offset form
|
231 |
+
Shape: [num_priors,4].
|
232 |
+
variances: (list[float]) Variances of priorboxes
|
233 |
+
Return:
|
234 |
+
encoded landm (tensor), Shape: [num_priors, 10]
|
235 |
+
"""
|
236 |
+
|
237 |
+
# dist b/t match center and prior's center
|
238 |
+
matched = torch.reshape(matched, (matched.size(0), 5, 2))
|
239 |
+
priors_cx = priors[:, 0].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)
|
240 |
+
priors_cy = priors[:, 1].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)
|
241 |
+
priors_w = priors[:, 2].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)
|
242 |
+
priors_h = priors[:, 3].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)
|
243 |
+
priors = torch.cat([priors_cx, priors_cy, priors_w, priors_h], dim=2)
|
244 |
+
g_cxcy = matched[:, :, :2] - priors[:, :, :2]
|
245 |
+
# encode variance
|
246 |
+
g_cxcy /= (variances[0] * priors[:, :, 2:])
|
247 |
+
# g_cxcy /= priors[:, :, 2:]
|
248 |
+
g_cxcy = g_cxcy.reshape(g_cxcy.size(0), -1)
|
249 |
+
# return target for smooth_l1_loss
|
250 |
+
return g_cxcy
|
251 |
+
|
252 |
+
|
253 |
+
# Adapted from https://github.com/Hakuyume/chainer-ssd
|
254 |
+
def decode(loc, priors, variances):
|
255 |
+
"""Decode locations from predictions using priors to undo
|
256 |
+
the encoding we did for offset regression at train time.
|
257 |
+
Args:
|
258 |
+
loc (tensor): location predictions for loc layers,
|
259 |
+
Shape: [num_priors,4]
|
260 |
+
priors (tensor): Prior boxes in center-offset form.
|
261 |
+
Shape: [num_priors,4].
|
262 |
+
variances: (list[float]) Variances of priorboxes
|
263 |
+
Return:
|
264 |
+
decoded bounding box predictions
|
265 |
+
"""
|
266 |
+
|
267 |
+
boxes = torch.cat((priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
|
268 |
+
priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)
|
269 |
+
boxes[:, :2] -= boxes[:, 2:] / 2
|
270 |
+
boxes[:, 2:] += boxes[:, :2]
|
271 |
+
return boxes
|
272 |
+
|
273 |
+
|
274 |
+
def decode_landm(pre, priors, variances):
|
275 |
+
"""Decode landm from predictions using priors to undo
|
276 |
+
the encoding we did for offset regression at train time.
|
277 |
+
Args:
|
278 |
+
pre (tensor): landm predictions for loc layers,
|
279 |
+
Shape: [num_priors,10]
|
280 |
+
priors (tensor): Prior boxes in center-offset form.
|
281 |
+
Shape: [num_priors,4].
|
282 |
+
variances: (list[float]) Variances of priorboxes
|
283 |
+
Return:
|
284 |
+
decoded landm predictions
|
285 |
+
"""
|
286 |
+
tmp = (
|
287 |
+
priors[:, :2] + pre[:, :2] * variances[0] * priors[:, 2:],
|
288 |
+
priors[:, :2] + pre[:, 2:4] * variances[0] * priors[:, 2:],
|
289 |
+
priors[:, :2] + pre[:, 4:6] * variances[0] * priors[:, 2:],
|
290 |
+
priors[:, :2] + pre[:, 6:8] * variances[0] * priors[:, 2:],
|
291 |
+
priors[:, :2] + pre[:, 8:10] * variances[0] * priors[:, 2:],
|
292 |
+
)
|
293 |
+
landms = torch.cat(tmp, dim=1)
|
294 |
+
return landms
|
295 |
+
|
296 |
+
|
297 |
+
def batched_decode(b_loc, priors, variances):
|
298 |
+
"""Decode locations from predictions using priors to undo
|
299 |
+
the encoding we did for offset regression at train time.
|
300 |
+
Args:
|
301 |
+
b_loc (tensor): location predictions for loc layers,
|
302 |
+
Shape: [num_batches,num_priors,4]
|
303 |
+
priors (tensor): Prior boxes in center-offset form.
|
304 |
+
Shape: [1,num_priors,4].
|
305 |
+
variances: (list[float]) Variances of priorboxes
|
306 |
+
Return:
|
307 |
+
decoded bounding box predictions
|
308 |
+
"""
|
309 |
+
boxes = (
|
310 |
+
priors[:, :, :2] + b_loc[:, :, :2] * variances[0] * priors[:, :, 2:],
|
311 |
+
priors[:, :, 2:] * torch.exp(b_loc[:, :, 2:] * variances[1]),
|
312 |
+
)
|
313 |
+
boxes = torch.cat(boxes, dim=2)
|
314 |
+
|
315 |
+
boxes[:, :, :2] -= boxes[:, :, 2:] / 2
|
316 |
+
boxes[:, :, 2:] += boxes[:, :, :2]
|
317 |
+
return boxes
|
318 |
+
|
319 |
+
|
320 |
+
def batched_decode_landm(pre, priors, variances):
|
321 |
+
"""Decode landm from predictions using priors to undo
|
322 |
+
the encoding we did for offset regression at train time.
|
323 |
+
Args:
|
324 |
+
pre (tensor): landm predictions for loc layers,
|
325 |
+
Shape: [num_batches,num_priors,10]
|
326 |
+
priors (tensor): Prior boxes in center-offset form.
|
327 |
+
Shape: [1,num_priors,4].
|
328 |
+
variances: (list[float]) Variances of priorboxes
|
329 |
+
Return:
|
330 |
+
decoded landm predictions
|
331 |
+
"""
|
332 |
+
landms = (
|
333 |
+
priors[:, :, :2] + pre[:, :, :2] * variances[0] * priors[:, :, 2:],
|
334 |
+
priors[:, :, :2] + pre[:, :, 2:4] * variances[0] * priors[:, :, 2:],
|
335 |
+
priors[:, :, :2] + pre[:, :, 4:6] * variances[0] * priors[:, :, 2:],
|
336 |
+
priors[:, :, :2] + pre[:, :, 6:8] * variances[0] * priors[:, :, 2:],
|
337 |
+
priors[:, :, :2] + pre[:, :, 8:10] * variances[0] * priors[:, :, 2:],
|
338 |
+
)
|
339 |
+
landms = torch.cat(landms, dim=2)
|
340 |
+
return landms
|
341 |
+
|
342 |
+
|
343 |
+
def log_sum_exp(x):
|
344 |
+
"""Utility function for computing log_sum_exp while determining
|
345 |
+
This will be used to determine unaveraged confidence loss across
|
346 |
+
all examples in a batch.
|
347 |
+
Args:
|
348 |
+
x (Variable(tensor)): conf_preds from conf layers
|
349 |
+
"""
|
350 |
+
x_max = x.data.max()
|
351 |
+
return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max
|
352 |
+
|
353 |
+
|
354 |
+
# Original author: Francisco Massa:
|
355 |
+
# https://github.com/fmassa/object-detection.torch
|
356 |
+
# Ported to PyTorch by Max deGroot (02/01/2017)
|
357 |
+
def nms(boxes, scores, overlap=0.5, top_k=200):
|
358 |
+
"""Apply non-maximum suppression at test time to avoid detecting too many
|
359 |
+
overlapping bounding boxes for a given object.
|
360 |
+
Args:
|
361 |
+
boxes: (tensor) The location preds for the img, Shape: [num_priors,4].
|
362 |
+
scores: (tensor) The class predscores for the img, Shape:[num_priors].
|
363 |
+
overlap: (float) The overlap thresh for suppressing unnecessary boxes.
|
364 |
+
top_k: (int) The Maximum number of box preds to consider.
|
365 |
+
Return:
|
366 |
+
The indices of the kept boxes with respect to num_priors.
|
367 |
+
"""
|
368 |
+
|
369 |
+
keep = torch.Tensor(scores.size(0)).fill_(0).long()
|
370 |
+
if boxes.numel() == 0:
|
371 |
+
return keep
|
372 |
+
x1 = boxes[:, 0]
|
373 |
+
y1 = boxes[:, 1]
|
374 |
+
x2 = boxes[:, 2]
|
375 |
+
y2 = boxes[:, 3]
|
376 |
+
area = torch.mul(x2 - x1, y2 - y1)
|
377 |
+
v, idx = scores.sort(0) # sort in ascending order
|
378 |
+
# I = I[v >= 0.01]
|
379 |
+
idx = idx[-top_k:] # indices of the top-k largest vals
|
380 |
+
xx1 = boxes.new()
|
381 |
+
yy1 = boxes.new()
|
382 |
+
xx2 = boxes.new()
|
383 |
+
yy2 = boxes.new()
|
384 |
+
w = boxes.new()
|
385 |
+
h = boxes.new()
|
386 |
+
|
387 |
+
# keep = torch.Tensor()
|
388 |
+
count = 0
|
389 |
+
while idx.numel() > 0:
|
390 |
+
i = idx[-1] # index of current largest val
|
391 |
+
# keep.append(i)
|
392 |
+
keep[count] = i
|
393 |
+
count += 1
|
394 |
+
if idx.size(0) == 1:
|
395 |
+
break
|
396 |
+
idx = idx[:-1] # remove kept element from view
|
397 |
+
# load bboxes of next highest vals
|
398 |
+
torch.index_select(x1, 0, idx, out=xx1)
|
399 |
+
torch.index_select(y1, 0, idx, out=yy1)
|
400 |
+
torch.index_select(x2, 0, idx, out=xx2)
|
401 |
+
torch.index_select(y2, 0, idx, out=yy2)
|
402 |
+
# store element-wise max with next highest score
|
403 |
+
xx1 = torch.clamp(xx1, min=x1[i])
|
404 |
+
yy1 = torch.clamp(yy1, min=y1[i])
|
405 |
+
xx2 = torch.clamp(xx2, max=x2[i])
|
406 |
+
yy2 = torch.clamp(yy2, max=y2[i])
|
407 |
+
w.resize_as_(xx2)
|
408 |
+
h.resize_as_(yy2)
|
409 |
+
w = xx2 - xx1
|
410 |
+
h = yy2 - yy1
|
411 |
+
# check sizes of xx1 and xx2.. after each iteration
|
412 |
+
w = torch.clamp(w, min=0.0)
|
413 |
+
h = torch.clamp(h, min=0.0)
|
414 |
+
inter = w * h
|
415 |
+
# IoU = i / (area(a) + area(b) - i)
|
416 |
+
rem_areas = torch.index_select(area, 0, idx) # load remaining areas)
|
417 |
+
union = (rem_areas - inter) + area[i]
|
418 |
+
IoU = inter / union # store result in iou
|
419 |
+
# keep only elements with an IoU <= overlap
|
420 |
+
idx = idx[IoU.le(overlap)]
|
421 |
+
return keep, count
|
extras/facexlib/parsing/__init__.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
|
3 |
+
from extras.facexlib.utils import load_file_from_url
|
4 |
+
from .bisenet import BiSeNet
|
5 |
+
from .parsenet import ParseNet
|
6 |
+
|
7 |
+
|
8 |
+
def init_parsing_model(model_name='bisenet', half=False, device='cuda', model_rootpath=None):
|
9 |
+
if model_name == 'bisenet':
|
10 |
+
model = BiSeNet(num_class=19)
|
11 |
+
model_url = 'https://github.com/xinntao/facexlib/releases/download/v0.2.0/parsing_bisenet.pth'
|
12 |
+
elif model_name == 'parsenet':
|
13 |
+
model = ParseNet(in_size=512, out_size=512, parsing_ch=19)
|
14 |
+
model_url = 'https://github.com/xinntao/facexlib/releases/download/v0.2.2/parsing_parsenet.pth'
|
15 |
+
else:
|
16 |
+
raise NotImplementedError(f'{model_name} is not implemented.')
|
17 |
+
|
18 |
+
model_path = load_file_from_url(
|
19 |
+
url=model_url, model_dir='facexlib/weights', progress=True, file_name=None, save_dir=model_rootpath)
|
20 |
+
load_net = torch.load(model_path, map_location=lambda storage, loc: storage)
|
21 |
+
model.load_state_dict(load_net, strict=True)
|
22 |
+
model.eval()
|
23 |
+
model = model.to(device)
|
24 |
+
return model
|
extras/facexlib/parsing/bisenet.py
ADDED
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
import torch.nn.functional as F
|
4 |
+
|
5 |
+
from .resnet import ResNet18
|
6 |
+
|
7 |
+
|
8 |
+
class ConvBNReLU(nn.Module):
|
9 |
+
|
10 |
+
def __init__(self, in_chan, out_chan, ks=3, stride=1, padding=1):
|
11 |
+
super(ConvBNReLU, self).__init__()
|
12 |
+
self.conv = nn.Conv2d(in_chan, out_chan, kernel_size=ks, stride=stride, padding=padding, bias=False)
|
13 |
+
self.bn = nn.BatchNorm2d(out_chan)
|
14 |
+
|
15 |
+
def forward(self, x):
|
16 |
+
x = self.conv(x)
|
17 |
+
x = F.relu(self.bn(x))
|
18 |
+
return x
|
19 |
+
|
20 |
+
|
21 |
+
class BiSeNetOutput(nn.Module):
|
22 |
+
|
23 |
+
def __init__(self, in_chan, mid_chan, num_class):
|
24 |
+
super(BiSeNetOutput, self).__init__()
|
25 |
+
self.conv = ConvBNReLU(in_chan, mid_chan, ks=3, stride=1, padding=1)
|
26 |
+
self.conv_out = nn.Conv2d(mid_chan, num_class, kernel_size=1, bias=False)
|
27 |
+
|
28 |
+
def forward(self, x):
|
29 |
+
feat = self.conv(x)
|
30 |
+
out = self.conv_out(feat)
|
31 |
+
return out, feat
|
32 |
+
|
33 |
+
|
34 |
+
class AttentionRefinementModule(nn.Module):
|
35 |
+
|
36 |
+
def __init__(self, in_chan, out_chan):
|
37 |
+
super(AttentionRefinementModule, self).__init__()
|
38 |
+
self.conv = ConvBNReLU(in_chan, out_chan, ks=3, stride=1, padding=1)
|
39 |
+
self.conv_atten = nn.Conv2d(out_chan, out_chan, kernel_size=1, bias=False)
|
40 |
+
self.bn_atten = nn.BatchNorm2d(out_chan)
|
41 |
+
self.sigmoid_atten = nn.Sigmoid()
|
42 |
+
|
43 |
+
def forward(self, x):
|
44 |
+
feat = self.conv(x)
|
45 |
+
atten = F.avg_pool2d(feat, feat.size()[2:])
|
46 |
+
atten = self.conv_atten(atten)
|
47 |
+
atten = self.bn_atten(atten)
|
48 |
+
atten = self.sigmoid_atten(atten)
|
49 |
+
out = torch.mul(feat, atten)
|
50 |
+
return out
|
51 |
+
|
52 |
+
|
53 |
+
class ContextPath(nn.Module):
|
54 |
+
|
55 |
+
def __init__(self):
|
56 |
+
super(ContextPath, self).__init__()
|
57 |
+
self.resnet = ResNet18()
|
58 |
+
self.arm16 = AttentionRefinementModule(256, 128)
|
59 |
+
self.arm32 = AttentionRefinementModule(512, 128)
|
60 |
+
self.conv_head32 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1)
|
61 |
+
self.conv_head16 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1)
|
62 |
+
self.conv_avg = ConvBNReLU(512, 128, ks=1, stride=1, padding=0)
|
63 |
+
|
64 |
+
def forward(self, x):
|
65 |
+
feat8, feat16, feat32 = self.resnet(x)
|
66 |
+
h8, w8 = feat8.size()[2:]
|
67 |
+
h16, w16 = feat16.size()[2:]
|
68 |
+
h32, w32 = feat32.size()[2:]
|
69 |
+
|
70 |
+
avg = F.avg_pool2d(feat32, feat32.size()[2:])
|
71 |
+
avg = self.conv_avg(avg)
|
72 |
+
avg_up = F.interpolate(avg, (h32, w32), mode='nearest')
|
73 |
+
|
74 |
+
feat32_arm = self.arm32(feat32)
|
75 |
+
feat32_sum = feat32_arm + avg_up
|
76 |
+
feat32_up = F.interpolate(feat32_sum, (h16, w16), mode='nearest')
|
77 |
+
feat32_up = self.conv_head32(feat32_up)
|
78 |
+
|
79 |
+
feat16_arm = self.arm16(feat16)
|
80 |
+
feat16_sum = feat16_arm + feat32_up
|
81 |
+
feat16_up = F.interpolate(feat16_sum, (h8, w8), mode='nearest')
|
82 |
+
feat16_up = self.conv_head16(feat16_up)
|
83 |
+
|
84 |
+
return feat8, feat16_up, feat32_up # x8, x8, x16
|
85 |
+
|
86 |
+
|
87 |
+
class FeatureFusionModule(nn.Module):
|
88 |
+
|
89 |
+
def __init__(self, in_chan, out_chan):
|
90 |
+
super(FeatureFusionModule, self).__init__()
|
91 |
+
self.convblk = ConvBNReLU(in_chan, out_chan, ks=1, stride=1, padding=0)
|
92 |
+
self.conv1 = nn.Conv2d(out_chan, out_chan // 4, kernel_size=1, stride=1, padding=0, bias=False)
|
93 |
+
self.conv2 = nn.Conv2d(out_chan // 4, out_chan, kernel_size=1, stride=1, padding=0, bias=False)
|
94 |
+
self.relu = nn.ReLU(inplace=True)
|
95 |
+
self.sigmoid = nn.Sigmoid()
|
96 |
+
|
97 |
+
def forward(self, fsp, fcp):
|
98 |
+
fcat = torch.cat([fsp, fcp], dim=1)
|
99 |
+
feat = self.convblk(fcat)
|
100 |
+
atten = F.avg_pool2d(feat, feat.size()[2:])
|
101 |
+
atten = self.conv1(atten)
|
102 |
+
atten = self.relu(atten)
|
103 |
+
atten = self.conv2(atten)
|
104 |
+
atten = self.sigmoid(atten)
|
105 |
+
feat_atten = torch.mul(feat, atten)
|
106 |
+
feat_out = feat_atten + feat
|
107 |
+
return feat_out
|
108 |
+
|
109 |
+
|
110 |
+
class BiSeNet(nn.Module):
|
111 |
+
|
112 |
+
def __init__(self, num_class):
|
113 |
+
super(BiSeNet, self).__init__()
|
114 |
+
self.cp = ContextPath()
|
115 |
+
self.ffm = FeatureFusionModule(256, 256)
|
116 |
+
self.conv_out = BiSeNetOutput(256, 256, num_class)
|
117 |
+
self.conv_out16 = BiSeNetOutput(128, 64, num_class)
|
118 |
+
self.conv_out32 = BiSeNetOutput(128, 64, num_class)
|
119 |
+
|
120 |
+
def forward(self, x, return_feat=False):
|
121 |
+
h, w = x.size()[2:]
|
122 |
+
feat_res8, feat_cp8, feat_cp16 = self.cp(x) # return res3b1 feature
|
123 |
+
feat_sp = feat_res8 # replace spatial path feature with res3b1 feature
|
124 |
+
feat_fuse = self.ffm(feat_sp, feat_cp8)
|
125 |
+
|
126 |
+
out, feat = self.conv_out(feat_fuse)
|
127 |
+
out16, feat16 = self.conv_out16(feat_cp8)
|
128 |
+
out32, feat32 = self.conv_out32(feat_cp16)
|
129 |
+
|
130 |
+
out = F.interpolate(out, (h, w), mode='bilinear', align_corners=True)
|
131 |
+
out16 = F.interpolate(out16, (h, w), mode='bilinear', align_corners=True)
|
132 |
+
out32 = F.interpolate(out32, (h, w), mode='bilinear', align_corners=True)
|
133 |
+
|
134 |
+
if return_feat:
|
135 |
+
feat = F.interpolate(feat, (h, w), mode='bilinear', align_corners=True)
|
136 |
+
feat16 = F.interpolate(feat16, (h, w), mode='bilinear', align_corners=True)
|
137 |
+
feat32 = F.interpolate(feat32, (h, w), mode='bilinear', align_corners=True)
|
138 |
+
return out, out16, out32, feat, feat16, feat32
|
139 |
+
else:
|
140 |
+
return out, out16, out32
|
extras/facexlib/parsing/parsenet.py
ADDED
@@ -0,0 +1,194 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Modified from https://github.com/chaofengc/PSFRGAN
|
2 |
+
"""
|
3 |
+
import numpy as np
|
4 |
+
import torch.nn as nn
|
5 |
+
from torch.nn import functional as F
|
6 |
+
|
7 |
+
|
8 |
+
class NormLayer(nn.Module):
|
9 |
+
"""Normalization Layers.
|
10 |
+
|
11 |
+
Args:
|
12 |
+
channels: input channels, for batch norm and instance norm.
|
13 |
+
input_size: input shape without batch size, for layer norm.
|
14 |
+
"""
|
15 |
+
|
16 |
+
def __init__(self, channels, normalize_shape=None, norm_type='bn'):
|
17 |
+
super(NormLayer, self).__init__()
|
18 |
+
norm_type = norm_type.lower()
|
19 |
+
self.norm_type = norm_type
|
20 |
+
if norm_type == 'bn':
|
21 |
+
self.norm = nn.BatchNorm2d(channels, affine=True)
|
22 |
+
elif norm_type == 'in':
|
23 |
+
self.norm = nn.InstanceNorm2d(channels, affine=False)
|
24 |
+
elif norm_type == 'gn':
|
25 |
+
self.norm = nn.GroupNorm(32, channels, affine=True)
|
26 |
+
elif norm_type == 'pixel':
|
27 |
+
self.norm = lambda x: F.normalize(x, p=2, dim=1)
|
28 |
+
elif norm_type == 'layer':
|
29 |
+
self.norm = nn.LayerNorm(normalize_shape)
|
30 |
+
elif norm_type == 'none':
|
31 |
+
self.norm = lambda x: x * 1.0
|
32 |
+
else:
|
33 |
+
assert 1 == 0, f'Norm type {norm_type} not support.'
|
34 |
+
|
35 |
+
def forward(self, x, ref=None):
|
36 |
+
if self.norm_type == 'spade':
|
37 |
+
return self.norm(x, ref)
|
38 |
+
else:
|
39 |
+
return self.norm(x)
|
40 |
+
|
41 |
+
|
42 |
+
class ReluLayer(nn.Module):
|
43 |
+
"""Relu Layer.
|
44 |
+
|
45 |
+
Args:
|
46 |
+
relu type: type of relu layer, candidates are
|
47 |
+
- ReLU
|
48 |
+
- LeakyReLU: default relu slope 0.2
|
49 |
+
- PRelu
|
50 |
+
- SELU
|
51 |
+
- none: direct pass
|
52 |
+
"""
|
53 |
+
|
54 |
+
def __init__(self, channels, relu_type='relu'):
|
55 |
+
super(ReluLayer, self).__init__()
|
56 |
+
relu_type = relu_type.lower()
|
57 |
+
if relu_type == 'relu':
|
58 |
+
self.func = nn.ReLU(True)
|
59 |
+
elif relu_type == 'leakyrelu':
|
60 |
+
self.func = nn.LeakyReLU(0.2, inplace=True)
|
61 |
+
elif relu_type == 'prelu':
|
62 |
+
self.func = nn.PReLU(channels)
|
63 |
+
elif relu_type == 'selu':
|
64 |
+
self.func = nn.SELU(True)
|
65 |
+
elif relu_type == 'none':
|
66 |
+
self.func = lambda x: x * 1.0
|
67 |
+
else:
|
68 |
+
assert 1 == 0, f'Relu type {relu_type} not support.'
|
69 |
+
|
70 |
+
def forward(self, x):
|
71 |
+
return self.func(x)
|
72 |
+
|
73 |
+
|
74 |
+
class ConvLayer(nn.Module):
|
75 |
+
|
76 |
+
def __init__(self,
|
77 |
+
in_channels,
|
78 |
+
out_channels,
|
79 |
+
kernel_size=3,
|
80 |
+
scale='none',
|
81 |
+
norm_type='none',
|
82 |
+
relu_type='none',
|
83 |
+
use_pad=True,
|
84 |
+
bias=True):
|
85 |
+
super(ConvLayer, self).__init__()
|
86 |
+
self.use_pad = use_pad
|
87 |
+
self.norm_type = norm_type
|
88 |
+
if norm_type in ['bn']:
|
89 |
+
bias = False
|
90 |
+
|
91 |
+
stride = 2 if scale == 'down' else 1
|
92 |
+
|
93 |
+
self.scale_func = lambda x: x
|
94 |
+
if scale == 'up':
|
95 |
+
self.scale_func = lambda x: nn.functional.interpolate(x, scale_factor=2, mode='nearest')
|
96 |
+
|
97 |
+
self.reflection_pad = nn.ReflectionPad2d(int(np.ceil((kernel_size - 1.) / 2)))
|
98 |
+
self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, bias=bias)
|
99 |
+
|
100 |
+
self.relu = ReluLayer(out_channels, relu_type)
|
101 |
+
self.norm = NormLayer(out_channels, norm_type=norm_type)
|
102 |
+
|
103 |
+
def forward(self, x):
|
104 |
+
out = self.scale_func(x)
|
105 |
+
if self.use_pad:
|
106 |
+
out = self.reflection_pad(out)
|
107 |
+
out = self.conv2d(out)
|
108 |
+
out = self.norm(out)
|
109 |
+
out = self.relu(out)
|
110 |
+
return out
|
111 |
+
|
112 |
+
|
113 |
+
class ResidualBlock(nn.Module):
|
114 |
+
"""
|
115 |
+
Residual block recommended in: http://torch.ch/blog/2016/02/04/resnets.html
|
116 |
+
"""
|
117 |
+
|
118 |
+
def __init__(self, c_in, c_out, relu_type='prelu', norm_type='bn', scale='none'):
|
119 |
+
super(ResidualBlock, self).__init__()
|
120 |
+
|
121 |
+
if scale == 'none' and c_in == c_out:
|
122 |
+
self.shortcut_func = lambda x: x
|
123 |
+
else:
|
124 |
+
self.shortcut_func = ConvLayer(c_in, c_out, 3, scale)
|
125 |
+
|
126 |
+
scale_config_dict = {'down': ['none', 'down'], 'up': ['up', 'none'], 'none': ['none', 'none']}
|
127 |
+
scale_conf = scale_config_dict[scale]
|
128 |
+
|
129 |
+
self.conv1 = ConvLayer(c_in, c_out, 3, scale_conf[0], norm_type=norm_type, relu_type=relu_type)
|
130 |
+
self.conv2 = ConvLayer(c_out, c_out, 3, scale_conf[1], norm_type=norm_type, relu_type='none')
|
131 |
+
|
132 |
+
def forward(self, x):
|
133 |
+
identity = self.shortcut_func(x)
|
134 |
+
|
135 |
+
res = self.conv1(x)
|
136 |
+
res = self.conv2(res)
|
137 |
+
return identity + res
|
138 |
+
|
139 |
+
|
140 |
+
class ParseNet(nn.Module):
|
141 |
+
|
142 |
+
def __init__(self,
|
143 |
+
in_size=128,
|
144 |
+
out_size=128,
|
145 |
+
min_feat_size=32,
|
146 |
+
base_ch=64,
|
147 |
+
parsing_ch=19,
|
148 |
+
res_depth=10,
|
149 |
+
relu_type='LeakyReLU',
|
150 |
+
norm_type='bn',
|
151 |
+
ch_range=[32, 256]):
|
152 |
+
super().__init__()
|
153 |
+
self.res_depth = res_depth
|
154 |
+
act_args = {'norm_type': norm_type, 'relu_type': relu_type}
|
155 |
+
min_ch, max_ch = ch_range
|
156 |
+
|
157 |
+
ch_clip = lambda x: max(min_ch, min(x, max_ch)) # noqa: E731
|
158 |
+
min_feat_size = min(in_size, min_feat_size)
|
159 |
+
|
160 |
+
down_steps = int(np.log2(in_size // min_feat_size))
|
161 |
+
up_steps = int(np.log2(out_size // min_feat_size))
|
162 |
+
|
163 |
+
# =============== define encoder-body-decoder ====================
|
164 |
+
self.encoder = []
|
165 |
+
self.encoder.append(ConvLayer(3, base_ch, 3, 1))
|
166 |
+
head_ch = base_ch
|
167 |
+
for i in range(down_steps):
|
168 |
+
cin, cout = ch_clip(head_ch), ch_clip(head_ch * 2)
|
169 |
+
self.encoder.append(ResidualBlock(cin, cout, scale='down', **act_args))
|
170 |
+
head_ch = head_ch * 2
|
171 |
+
|
172 |
+
self.body = []
|
173 |
+
for i in range(res_depth):
|
174 |
+
self.body.append(ResidualBlock(ch_clip(head_ch), ch_clip(head_ch), **act_args))
|
175 |
+
|
176 |
+
self.decoder = []
|
177 |
+
for i in range(up_steps):
|
178 |
+
cin, cout = ch_clip(head_ch), ch_clip(head_ch // 2)
|
179 |
+
self.decoder.append(ResidualBlock(cin, cout, scale='up', **act_args))
|
180 |
+
head_ch = head_ch // 2
|
181 |
+
|
182 |
+
self.encoder = nn.Sequential(*self.encoder)
|
183 |
+
self.body = nn.Sequential(*self.body)
|
184 |
+
self.decoder = nn.Sequential(*self.decoder)
|
185 |
+
self.out_img_conv = ConvLayer(ch_clip(head_ch), 3)
|
186 |
+
self.out_mask_conv = ConvLayer(ch_clip(head_ch), parsing_ch)
|
187 |
+
|
188 |
+
def forward(self, x):
|
189 |
+
feat = self.encoder(x)
|
190 |
+
x = feat + self.body(feat)
|
191 |
+
x = self.decoder(x)
|
192 |
+
out_img = self.out_img_conv(x)
|
193 |
+
out_mask = self.out_mask_conv(x)
|
194 |
+
return out_mask, out_img
|
extras/facexlib/parsing/resnet.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch.nn as nn
|
2 |
+
import torch.nn.functional as F
|
3 |
+
|
4 |
+
|
5 |
+
def conv3x3(in_planes, out_planes, stride=1):
|
6 |
+
"""3x3 convolution with padding"""
|
7 |
+
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
|
8 |
+
|
9 |
+
|
10 |
+
class BasicBlock(nn.Module):
|
11 |
+
|
12 |
+
def __init__(self, in_chan, out_chan, stride=1):
|
13 |
+
super(BasicBlock, self).__init__()
|
14 |
+
self.conv1 = conv3x3(in_chan, out_chan, stride)
|
15 |
+
self.bn1 = nn.BatchNorm2d(out_chan)
|
16 |
+
self.conv2 = conv3x3(out_chan, out_chan)
|
17 |
+
self.bn2 = nn.BatchNorm2d(out_chan)
|
18 |
+
self.relu = nn.ReLU(inplace=True)
|
19 |
+
self.downsample = None
|
20 |
+
if in_chan != out_chan or stride != 1:
|
21 |
+
self.downsample = nn.Sequential(
|
22 |
+
nn.Conv2d(in_chan, out_chan, kernel_size=1, stride=stride, bias=False),
|
23 |
+
nn.BatchNorm2d(out_chan),
|
24 |
+
)
|
25 |
+
|
26 |
+
def forward(self, x):
|
27 |
+
residual = self.conv1(x)
|
28 |
+
residual = F.relu(self.bn1(residual))
|
29 |
+
residual = self.conv2(residual)
|
30 |
+
residual = self.bn2(residual)
|
31 |
+
|
32 |
+
shortcut = x
|
33 |
+
if self.downsample is not None:
|
34 |
+
shortcut = self.downsample(x)
|
35 |
+
|
36 |
+
out = shortcut + residual
|
37 |
+
out = self.relu(out)
|
38 |
+
return out
|
39 |
+
|
40 |
+
|
41 |
+
def create_layer_basic(in_chan, out_chan, bnum, stride=1):
|
42 |
+
layers = [BasicBlock(in_chan, out_chan, stride=stride)]
|
43 |
+
for i in range(bnum - 1):
|
44 |
+
layers.append(BasicBlock(out_chan, out_chan, stride=1))
|
45 |
+
return nn.Sequential(*layers)
|
46 |
+
|
47 |
+
|
48 |
+
class ResNet18(nn.Module):
|
49 |
+
|
50 |
+
def __init__(self):
|
51 |
+
super(ResNet18, self).__init__()
|
52 |
+
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
|
53 |
+
self.bn1 = nn.BatchNorm2d(64)
|
54 |
+
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
|
55 |
+
self.layer1 = create_layer_basic(64, 64, bnum=2, stride=1)
|
56 |
+
self.layer2 = create_layer_basic(64, 128, bnum=2, stride=2)
|
57 |
+
self.layer3 = create_layer_basic(128, 256, bnum=2, stride=2)
|
58 |
+
self.layer4 = create_layer_basic(256, 512, bnum=2, stride=2)
|
59 |
+
|
60 |
+
def forward(self, x):
|
61 |
+
x = self.conv1(x)
|
62 |
+
x = F.relu(self.bn1(x))
|
63 |
+
x = self.maxpool(x)
|
64 |
+
|
65 |
+
x = self.layer1(x)
|
66 |
+
feat8 = self.layer2(x) # 1/8
|
67 |
+
feat16 = self.layer3(feat8) # 1/16
|
68 |
+
feat32 = self.layer4(feat16) # 1/32
|
69 |
+
return feat8, feat16, feat32
|
extras/facexlib/utils/__init__.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .face_utils import align_crop_face_landmarks, compute_increased_bbox, get_valid_bboxes, paste_face_back
|
2 |
+
from .misc import img2tensor, load_file_from_url, scandir
|
3 |
+
|
4 |
+
__all__ = [
|
5 |
+
'align_crop_face_landmarks', 'compute_increased_bbox', 'get_valid_bboxes', 'load_file_from_url', 'paste_face_back',
|
6 |
+
'img2tensor', 'scandir'
|
7 |
+
]
|
extras/facexlib/utils/face_restoration_helper.py
ADDED
@@ -0,0 +1,374 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
import os
|
4 |
+
import torch
|
5 |
+
from torchvision.transforms.functional import normalize
|
6 |
+
|
7 |
+
from extras.facexlib.detection import init_detection_model
|
8 |
+
from extras.facexlib.parsing import init_parsing_model
|
9 |
+
from extras.facexlib.utils.misc import img2tensor, imwrite
|
10 |
+
|
11 |
+
|
12 |
+
def get_largest_face(det_faces, h, w):
|
13 |
+
|
14 |
+
def get_location(val, length):
|
15 |
+
if val < 0:
|
16 |
+
return 0
|
17 |
+
elif val > length:
|
18 |
+
return length
|
19 |
+
else:
|
20 |
+
return val
|
21 |
+
|
22 |
+
face_areas = []
|
23 |
+
for det_face in det_faces:
|
24 |
+
left = get_location(det_face[0], w)
|
25 |
+
right = get_location(det_face[2], w)
|
26 |
+
top = get_location(det_face[1], h)
|
27 |
+
bottom = get_location(det_face[3], h)
|
28 |
+
face_area = (right - left) * (bottom - top)
|
29 |
+
face_areas.append(face_area)
|
30 |
+
largest_idx = face_areas.index(max(face_areas))
|
31 |
+
return det_faces[largest_idx], largest_idx
|
32 |
+
|
33 |
+
|
34 |
+
def get_center_face(det_faces, h=0, w=0, center=None):
|
35 |
+
if center is not None:
|
36 |
+
center = np.array(center)
|
37 |
+
else:
|
38 |
+
center = np.array([w / 2, h / 2])
|
39 |
+
center_dist = []
|
40 |
+
for det_face in det_faces:
|
41 |
+
face_center = np.array([(det_face[0] + det_face[2]) / 2, (det_face[1] + det_face[3]) / 2])
|
42 |
+
dist = np.linalg.norm(face_center - center)
|
43 |
+
center_dist.append(dist)
|
44 |
+
center_idx = center_dist.index(min(center_dist))
|
45 |
+
return det_faces[center_idx], center_idx
|
46 |
+
|
47 |
+
|
48 |
+
class FaceRestoreHelper(object):
|
49 |
+
"""Helper for the face restoration pipeline (base class)."""
|
50 |
+
|
51 |
+
def __init__(self,
|
52 |
+
upscale_factor,
|
53 |
+
face_size=512,
|
54 |
+
crop_ratio=(1, 1),
|
55 |
+
det_model='retinaface_resnet50',
|
56 |
+
save_ext='png',
|
57 |
+
template_3points=False,
|
58 |
+
pad_blur=False,
|
59 |
+
use_parse=False,
|
60 |
+
device=None,
|
61 |
+
model_rootpath=None):
|
62 |
+
self.template_3points = template_3points # improve robustness
|
63 |
+
self.upscale_factor = upscale_factor
|
64 |
+
# the cropped face ratio based on the square face
|
65 |
+
self.crop_ratio = crop_ratio # (h, w)
|
66 |
+
assert (self.crop_ratio[0] >= 1 and self.crop_ratio[1] >= 1), 'crop ration only supports >=1'
|
67 |
+
self.face_size = (int(face_size * self.crop_ratio[1]), int(face_size * self.crop_ratio[0]))
|
68 |
+
|
69 |
+
if self.template_3points:
|
70 |
+
self.face_template = np.array([[192, 240], [319, 240], [257, 371]])
|
71 |
+
else:
|
72 |
+
# standard 5 landmarks for FFHQ faces with 512 x 512
|
73 |
+
self.face_template = np.array([[192.98138, 239.94708], [318.90277, 240.1936], [256.63416, 314.01935],
|
74 |
+
[201.26117, 371.41043], [313.08905, 371.15118]])
|
75 |
+
self.face_template = self.face_template * (face_size / 512.0)
|
76 |
+
if self.crop_ratio[0] > 1:
|
77 |
+
self.face_template[:, 1] += face_size * (self.crop_ratio[0] - 1) / 2
|
78 |
+
if self.crop_ratio[1] > 1:
|
79 |
+
self.face_template[:, 0] += face_size * (self.crop_ratio[1] - 1) / 2
|
80 |
+
self.save_ext = save_ext
|
81 |
+
self.pad_blur = pad_blur
|
82 |
+
if self.pad_blur is True:
|
83 |
+
self.template_3points = False
|
84 |
+
|
85 |
+
self.all_landmarks_5 = []
|
86 |
+
self.det_faces = []
|
87 |
+
self.affine_matrices = []
|
88 |
+
self.inverse_affine_matrices = []
|
89 |
+
self.cropped_faces = []
|
90 |
+
self.restored_faces = []
|
91 |
+
self.pad_input_imgs = []
|
92 |
+
|
93 |
+
if device is None:
|
94 |
+
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
95 |
+
else:
|
96 |
+
self.device = device
|
97 |
+
|
98 |
+
# init face detection model
|
99 |
+
self.face_det = init_detection_model(det_model, half=False, device=self.device, model_rootpath=model_rootpath)
|
100 |
+
|
101 |
+
# init face parsing model
|
102 |
+
self.use_parse = use_parse
|
103 |
+
self.face_parse = init_parsing_model(model_name='parsenet', device=self.device, model_rootpath=model_rootpath)
|
104 |
+
|
105 |
+
def set_upscale_factor(self, upscale_factor):
|
106 |
+
self.upscale_factor = upscale_factor
|
107 |
+
|
108 |
+
def read_image(self, img):
|
109 |
+
"""img can be image path or cv2 loaded image."""
|
110 |
+
# self.input_img is Numpy array, (h, w, c), BGR, uint8, [0, 255]
|
111 |
+
if isinstance(img, str):
|
112 |
+
img = cv2.imread(img)
|
113 |
+
|
114 |
+
if np.max(img) > 256: # 16-bit image
|
115 |
+
img = img / 65535 * 255
|
116 |
+
if len(img.shape) == 2: # gray image
|
117 |
+
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
|
118 |
+
elif img.shape[2] == 4: # RGBA image with alpha channel
|
119 |
+
img = img[:, :, 0:3]
|
120 |
+
|
121 |
+
self.input_img = img
|
122 |
+
|
123 |
+
def get_face_landmarks_5(self,
|
124 |
+
only_keep_largest=False,
|
125 |
+
only_center_face=False,
|
126 |
+
resize=None,
|
127 |
+
blur_ratio=0.01,
|
128 |
+
eye_dist_threshold=None):
|
129 |
+
if resize is None:
|
130 |
+
scale = 1
|
131 |
+
input_img = self.input_img
|
132 |
+
else:
|
133 |
+
h, w = self.input_img.shape[0:2]
|
134 |
+
scale = min(h, w) / resize
|
135 |
+
h, w = int(h / scale), int(w / scale)
|
136 |
+
input_img = cv2.resize(self.input_img, (w, h), interpolation=cv2.INTER_LANCZOS4)
|
137 |
+
|
138 |
+
with torch.no_grad():
|
139 |
+
bboxes = self.face_det.detect_faces(input_img, 0.97) * scale
|
140 |
+
for bbox in bboxes:
|
141 |
+
# remove faces with too small eye distance: side faces or too small faces
|
142 |
+
eye_dist = np.linalg.norm([bbox[5] - bbox[7], bbox[6] - bbox[8]])
|
143 |
+
if eye_dist_threshold is not None and (eye_dist < eye_dist_threshold):
|
144 |
+
continue
|
145 |
+
|
146 |
+
if self.template_3points:
|
147 |
+
landmark = np.array([[bbox[i], bbox[i + 1]] for i in range(5, 11, 2)])
|
148 |
+
else:
|
149 |
+
landmark = np.array([[bbox[i], bbox[i + 1]] for i in range(5, 15, 2)])
|
150 |
+
self.all_landmarks_5.append(landmark)
|
151 |
+
self.det_faces.append(bbox[0:5])
|
152 |
+
if len(self.det_faces) == 0:
|
153 |
+
return 0
|
154 |
+
if only_keep_largest:
|
155 |
+
h, w, _ = self.input_img.shape
|
156 |
+
self.det_faces, largest_idx = get_largest_face(self.det_faces, h, w)
|
157 |
+
self.all_landmarks_5 = [self.all_landmarks_5[largest_idx]]
|
158 |
+
elif only_center_face:
|
159 |
+
h, w, _ = self.input_img.shape
|
160 |
+
self.det_faces, center_idx = get_center_face(self.det_faces, h, w)
|
161 |
+
self.all_landmarks_5 = [self.all_landmarks_5[center_idx]]
|
162 |
+
|
163 |
+
# pad blurry images
|
164 |
+
if self.pad_blur:
|
165 |
+
self.pad_input_imgs = []
|
166 |
+
for landmarks in self.all_landmarks_5:
|
167 |
+
# get landmarks
|
168 |
+
eye_left = landmarks[0, :]
|
169 |
+
eye_right = landmarks[1, :]
|
170 |
+
eye_avg = (eye_left + eye_right) * 0.5
|
171 |
+
mouth_avg = (landmarks[3, :] + landmarks[4, :]) * 0.5
|
172 |
+
eye_to_eye = eye_right - eye_left
|
173 |
+
eye_to_mouth = mouth_avg - eye_avg
|
174 |
+
|
175 |
+
# Get the oriented crop rectangle
|
176 |
+
# x: half width of the oriented crop rectangle
|
177 |
+
x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]
|
178 |
+
# - np.flipud(eye_to_mouth) * [-1, 1]: rotate 90 clockwise
|
179 |
+
# norm with the hypotenuse: get the direction
|
180 |
+
x /= np.hypot(*x) # get the hypotenuse of a right triangle
|
181 |
+
rect_scale = 1.5
|
182 |
+
x *= max(np.hypot(*eye_to_eye) * 2.0 * rect_scale, np.hypot(*eye_to_mouth) * 1.8 * rect_scale)
|
183 |
+
# y: half height of the oriented crop rectangle
|
184 |
+
y = np.flipud(x) * [-1, 1]
|
185 |
+
|
186 |
+
# c: center
|
187 |
+
c = eye_avg + eye_to_mouth * 0.1
|
188 |
+
# quad: (left_top, left_bottom, right_bottom, right_top)
|
189 |
+
quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
|
190 |
+
# qsize: side length of the square
|
191 |
+
qsize = np.hypot(*x) * 2
|
192 |
+
border = max(int(np.rint(qsize * 0.1)), 3)
|
193 |
+
|
194 |
+
# get pad
|
195 |
+
# pad: (width_left, height_top, width_right, height_bottom)
|
196 |
+
pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
|
197 |
+
int(np.ceil(max(quad[:, 1]))))
|
198 |
+
pad = [
|
199 |
+
max(-pad[0] + border, 1),
|
200 |
+
max(-pad[1] + border, 1),
|
201 |
+
max(pad[2] - self.input_img.shape[0] + border, 1),
|
202 |
+
max(pad[3] - self.input_img.shape[1] + border, 1)
|
203 |
+
]
|
204 |
+
|
205 |
+
if max(pad) > 1:
|
206 |
+
# pad image
|
207 |
+
pad_img = np.pad(self.input_img, ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
|
208 |
+
# modify landmark coords
|
209 |
+
landmarks[:, 0] += pad[0]
|
210 |
+
landmarks[:, 1] += pad[1]
|
211 |
+
# blur pad images
|
212 |
+
h, w, _ = pad_img.shape
|
213 |
+
y, x, _ = np.ogrid[:h, :w, :1]
|
214 |
+
mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0],
|
215 |
+
np.float32(w - 1 - x) / pad[2]),
|
216 |
+
1.0 - np.minimum(np.float32(y) / pad[1],
|
217 |
+
np.float32(h - 1 - y) / pad[3]))
|
218 |
+
blur = int(qsize * blur_ratio)
|
219 |
+
if blur % 2 == 0:
|
220 |
+
blur += 1
|
221 |
+
blur_img = cv2.boxFilter(pad_img, 0, ksize=(blur, blur))
|
222 |
+
# blur_img = cv2.GaussianBlur(pad_img, (blur, blur), 0)
|
223 |
+
|
224 |
+
pad_img = pad_img.astype('float32')
|
225 |
+
pad_img += (blur_img - pad_img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
|
226 |
+
pad_img += (np.median(pad_img, axis=(0, 1)) - pad_img) * np.clip(mask, 0.0, 1.0)
|
227 |
+
pad_img = np.clip(pad_img, 0, 255) # float32, [0, 255]
|
228 |
+
self.pad_input_imgs.append(pad_img)
|
229 |
+
else:
|
230 |
+
self.pad_input_imgs.append(np.copy(self.input_img))
|
231 |
+
|
232 |
+
return len(self.all_landmarks_5)
|
233 |
+
|
234 |
+
def align_warp_face(self, save_cropped_path=None, border_mode='constant'):
|
235 |
+
"""Align and warp faces with face template.
|
236 |
+
"""
|
237 |
+
if self.pad_blur:
|
238 |
+
assert len(self.pad_input_imgs) == len(
|
239 |
+
self.all_landmarks_5), f'Mismatched samples: {len(self.pad_input_imgs)} and {len(self.all_landmarks_5)}'
|
240 |
+
for idx, landmark in enumerate(self.all_landmarks_5):
|
241 |
+
# use 5 landmarks to get affine matrix
|
242 |
+
# use cv2.LMEDS method for the equivalence to skimage transform
|
243 |
+
# ref: https://blog.csdn.net/yichxi/article/details/115827338
|
244 |
+
affine_matrix = cv2.estimateAffinePartial2D(landmark, self.face_template, method=cv2.LMEDS)[0]
|
245 |
+
self.affine_matrices.append(affine_matrix)
|
246 |
+
# warp and crop faces
|
247 |
+
if border_mode == 'constant':
|
248 |
+
border_mode = cv2.BORDER_CONSTANT
|
249 |
+
elif border_mode == 'reflect101':
|
250 |
+
border_mode = cv2.BORDER_REFLECT101
|
251 |
+
elif border_mode == 'reflect':
|
252 |
+
border_mode = cv2.BORDER_REFLECT
|
253 |
+
if self.pad_blur:
|
254 |
+
input_img = self.pad_input_imgs[idx]
|
255 |
+
else:
|
256 |
+
input_img = self.input_img
|
257 |
+
cropped_face = cv2.warpAffine(
|
258 |
+
input_img, affine_matrix, self.face_size, borderMode=border_mode, borderValue=(135, 133, 132)) # gray
|
259 |
+
self.cropped_faces.append(cropped_face)
|
260 |
+
# save the cropped face
|
261 |
+
if save_cropped_path is not None:
|
262 |
+
path = os.path.splitext(save_cropped_path)[0]
|
263 |
+
save_path = f'{path}_{idx:02d}.{self.save_ext}'
|
264 |
+
imwrite(cropped_face, save_path)
|
265 |
+
|
266 |
+
def get_inverse_affine(self, save_inverse_affine_path=None):
|
267 |
+
"""Get inverse affine matrix."""
|
268 |
+
for idx, affine_matrix in enumerate(self.affine_matrices):
|
269 |
+
inverse_affine = cv2.invertAffineTransform(affine_matrix)
|
270 |
+
inverse_affine *= self.upscale_factor
|
271 |
+
self.inverse_affine_matrices.append(inverse_affine)
|
272 |
+
# save inverse affine matrices
|
273 |
+
if save_inverse_affine_path is not None:
|
274 |
+
path, _ = os.path.splitext(save_inverse_affine_path)
|
275 |
+
save_path = f'{path}_{idx:02d}.pth'
|
276 |
+
torch.save(inverse_affine, save_path)
|
277 |
+
|
278 |
+
def add_restored_face(self, face):
|
279 |
+
self.restored_faces.append(face)
|
280 |
+
|
281 |
+
def paste_faces_to_input_image(self, save_path=None, upsample_img=None):
|
282 |
+
h, w, _ = self.input_img.shape
|
283 |
+
h_up, w_up = int(h * self.upscale_factor), int(w * self.upscale_factor)
|
284 |
+
|
285 |
+
if upsample_img is None:
|
286 |
+
# simply resize the background
|
287 |
+
upsample_img = cv2.resize(self.input_img, (w_up, h_up), interpolation=cv2.INTER_LANCZOS4)
|
288 |
+
else:
|
289 |
+
upsample_img = cv2.resize(upsample_img, (w_up, h_up), interpolation=cv2.INTER_LANCZOS4)
|
290 |
+
|
291 |
+
assert len(self.restored_faces) == len(
|
292 |
+
self.inverse_affine_matrices), ('length of restored_faces and affine_matrices are different.')
|
293 |
+
for restored_face, inverse_affine in zip(self.restored_faces, self.inverse_affine_matrices):
|
294 |
+
# Add an offset to inverse affine matrix, for more precise back alignment
|
295 |
+
if self.upscale_factor > 1:
|
296 |
+
extra_offset = 0.5 * self.upscale_factor
|
297 |
+
else:
|
298 |
+
extra_offset = 0
|
299 |
+
inverse_affine[:, 2] += extra_offset
|
300 |
+
inv_restored = cv2.warpAffine(restored_face, inverse_affine, (w_up, h_up))
|
301 |
+
|
302 |
+
if self.use_parse:
|
303 |
+
# inference
|
304 |
+
face_input = cv2.resize(restored_face, (512, 512), interpolation=cv2.INTER_LINEAR)
|
305 |
+
face_input = img2tensor(face_input.astype('float32') / 255., bgr2rgb=True, float32=True)
|
306 |
+
normalize(face_input, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)
|
307 |
+
face_input = torch.unsqueeze(face_input, 0).to(self.device)
|
308 |
+
with torch.no_grad():
|
309 |
+
out = self.face_parse(face_input)[0]
|
310 |
+
out = out.argmax(dim=1).squeeze().cpu().numpy()
|
311 |
+
|
312 |
+
mask = np.zeros(out.shape)
|
313 |
+
MASK_COLORMAP = [0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 255, 0, 0, 0]
|
314 |
+
for idx, color in enumerate(MASK_COLORMAP):
|
315 |
+
mask[out == idx] = color
|
316 |
+
# blur the mask
|
317 |
+
mask = cv2.GaussianBlur(mask, (101, 101), 11)
|
318 |
+
mask = cv2.GaussianBlur(mask, (101, 101), 11)
|
319 |
+
# remove the black borders
|
320 |
+
thres = 10
|
321 |
+
mask[:thres, :] = 0
|
322 |
+
mask[-thres:, :] = 0
|
323 |
+
mask[:, :thres] = 0
|
324 |
+
mask[:, -thres:] = 0
|
325 |
+
mask = mask / 255.
|
326 |
+
|
327 |
+
mask = cv2.resize(mask, restored_face.shape[:2])
|
328 |
+
mask = cv2.warpAffine(mask, inverse_affine, (w_up, h_up), flags=3)
|
329 |
+
inv_soft_mask = mask[:, :, None]
|
330 |
+
pasted_face = inv_restored
|
331 |
+
|
332 |
+
else: # use square parse maps
|
333 |
+
mask = np.ones(self.face_size, dtype=np.float32)
|
334 |
+
inv_mask = cv2.warpAffine(mask, inverse_affine, (w_up, h_up))
|
335 |
+
# remove the black borders
|
336 |
+
inv_mask_erosion = cv2.erode(
|
337 |
+
inv_mask, np.ones((int(2 * self.upscale_factor), int(2 * self.upscale_factor)), np.uint8))
|
338 |
+
pasted_face = inv_mask_erosion[:, :, None] * inv_restored
|
339 |
+
total_face_area = np.sum(inv_mask_erosion) # // 3
|
340 |
+
# compute the fusion edge based on the area of face
|
341 |
+
w_edge = int(total_face_area**0.5) // 20
|
342 |
+
erosion_radius = w_edge * 2
|
343 |
+
inv_mask_center = cv2.erode(inv_mask_erosion, np.ones((erosion_radius, erosion_radius), np.uint8))
|
344 |
+
blur_size = w_edge * 2
|
345 |
+
inv_soft_mask = cv2.GaussianBlur(inv_mask_center, (blur_size + 1, blur_size + 1), 0)
|
346 |
+
if len(upsample_img.shape) == 2: # upsample_img is gray image
|
347 |
+
upsample_img = upsample_img[:, :, None]
|
348 |
+
inv_soft_mask = inv_soft_mask[:, :, None]
|
349 |
+
|
350 |
+
if len(upsample_img.shape) == 3 and upsample_img.shape[2] == 4: # alpha channel
|
351 |
+
alpha = upsample_img[:, :, 3:]
|
352 |
+
upsample_img = inv_soft_mask * pasted_face + (1 - inv_soft_mask) * upsample_img[:, :, 0:3]
|
353 |
+
upsample_img = np.concatenate((upsample_img, alpha), axis=2)
|
354 |
+
else:
|
355 |
+
upsample_img = inv_soft_mask * pasted_face + (1 - inv_soft_mask) * upsample_img
|
356 |
+
|
357 |
+
if np.max(upsample_img) > 256: # 16-bit image
|
358 |
+
upsample_img = upsample_img.astype(np.uint16)
|
359 |
+
else:
|
360 |
+
upsample_img = upsample_img.astype(np.uint8)
|
361 |
+
if save_path is not None:
|
362 |
+
path = os.path.splitext(save_path)[0]
|
363 |
+
save_path = f'{path}.{self.save_ext}'
|
364 |
+
imwrite(upsample_img, save_path)
|
365 |
+
return upsample_img
|
366 |
+
|
367 |
+
def clean_all(self):
|
368 |
+
self.all_landmarks_5 = []
|
369 |
+
self.restored_faces = []
|
370 |
+
self.affine_matrices = []
|
371 |
+
self.cropped_faces = []
|
372 |
+
self.inverse_affine_matrices = []
|
373 |
+
self.det_faces = []
|
374 |
+
self.pad_input_imgs = []
|