akhaliq HF staff commited on
Commit
ddb7173
1 Parent(s): b455dc7

update gfpgan demo (#4)

Browse files

- update gfpgan (a7308990ea5b8c9666e3eccd533bbe287d18005b)

.gitattributes CHANGED
@@ -1,7 +1,6 @@
1
  *.7z filter=lfs diff=lfs merge=lfs -text
2
  *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bin.* filter=lfs diff=lfs merge=lfs -text
5
  *.bz2 filter=lfs diff=lfs merge=lfs -text
6
  *.ftz filter=lfs diff=lfs merge=lfs -text
7
  *.gz filter=lfs diff=lfs merge=lfs -text
@@ -10,9 +9,13 @@
10
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
  *.model filter=lfs diff=lfs merge=lfs -text
12
  *.msgpack filter=lfs diff=lfs merge=lfs -text
 
 
13
  *.onnx filter=lfs diff=lfs merge=lfs -text
14
  *.ot filter=lfs diff=lfs merge=lfs -text
15
  *.parquet filter=lfs diff=lfs merge=lfs -text
 
 
16
  *.pb filter=lfs diff=lfs merge=lfs -text
17
  *.pt filter=lfs diff=lfs merge=lfs -text
18
  *.pth filter=lfs diff=lfs merge=lfs -text
@@ -21,7 +24,8 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
  *.tar.* filter=lfs diff=lfs merge=lfs -text
22
  *.tflite filter=lfs diff=lfs merge=lfs -text
23
  *.tgz filter=lfs diff=lfs merge=lfs -text
 
24
  *.xz filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
- *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
  *.7z filter=lfs diff=lfs merge=lfs -text
2
  *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
 
4
  *.bz2 filter=lfs diff=lfs merge=lfs -text
5
  *.ftz filter=lfs diff=lfs merge=lfs -text
6
  *.gz filter=lfs diff=lfs merge=lfs -text
 
9
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
  *.model filter=lfs diff=lfs merge=lfs -text
11
  *.msgpack filter=lfs diff=lfs merge=lfs -text
12
+ *.npy filter=lfs diff=lfs merge=lfs -text
13
+ *.npz filter=lfs diff=lfs merge=lfs -text
14
  *.onnx filter=lfs diff=lfs merge=lfs -text
15
  *.ot filter=lfs diff=lfs merge=lfs -text
16
  *.parquet filter=lfs diff=lfs merge=lfs -text
17
+ *.pickle filter=lfs diff=lfs merge=lfs -text
18
+ *.pkl filter=lfs diff=lfs merge=lfs -text
19
  *.pb filter=lfs diff=lfs merge=lfs -text
20
  *.pt filter=lfs diff=lfs merge=lfs -text
21
  *.pth filter=lfs diff=lfs merge=lfs -text
 
24
  *.tar.* filter=lfs diff=lfs merge=lfs -text
25
  *.tflite filter=lfs diff=lfs merge=lfs -text
26
  *.tgz filter=lfs diff=lfs merge=lfs -text
27
+ *.wasm filter=lfs diff=lfs merge=lfs -text
28
  *.xz filter=lfs diff=lfs merge=lfs -text
29
  *.zip filter=lfs diff=lfs merge=lfs -text
30
+ *.zst filter=lfs diff=lfs merge=lfs -text
31
  *tfevents* filter=lfs diff=lfs merge=lfs -text
LICENSE DELETED
@@ -1,351 +0,0 @@
1
- Tencent is pleased to support the open source community by making GFPGAN available.
2
-
3
- Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
4
-
5
- GFPGAN is licensed under the Apache License Version 2.0 except for the third-party components listed below.
6
-
7
-
8
- Terms of the Apache License Version 2.0:
9
- ---------------------------------------------
10
- Apache License
11
-
12
- Version 2.0, January 2004
13
-
14
- http://www.apache.org/licenses/
15
-
16
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
17
- 1. Definitions.
18
-
19
- “License” shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
20
-
21
- “Licensor” shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
22
-
23
- “Legal Entity” shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, “control” means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
24
-
25
- “You” (or “Your”) shall mean an individual or Legal Entity exercising permissions granted by this License.
26
-
27
- “Source” form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
28
-
29
- “Object” form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
30
-
31
- “Work” shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
32
-
33
- “Derivative Works” shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
34
-
35
- “Contribution” shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, “submitted” means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as “Not a Contribution.”
36
-
37
- “Contributor” shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
38
-
39
- 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
40
-
41
- 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
42
-
43
- 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
44
-
45
- You must give any other recipients of the Work or Derivative Works a copy of this License; and
46
-
47
- You must cause any modified files to carry prominent notices stating that You changed the files; and
48
-
49
- You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
50
-
51
- If the Work includes a “NOTICE” text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
52
-
53
- You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
54
-
55
- 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
56
-
57
- 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
58
-
59
- 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
60
-
61
- 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
62
-
63
- 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
64
-
65
- END OF TERMS AND CONDITIONS
66
-
67
-
68
-
69
- Other dependencies and licenses:
70
-
71
-
72
- Open Source Software licensed under the Apache 2.0 license and Other Licenses of the Third-Party Components therein:
73
- ---------------------------------------------
74
- 1. basicsr
75
- Copyright 2018-2020 BasicSR Authors
76
-
77
-
78
- This BasicSR project is released under the Apache 2.0 license.
79
-
80
- A copy of Apache 2.0 is included in this file.
81
-
82
- StyleGAN2
83
- The codes are modified from the repository stylegan2-pytorch. Many thanks to the author - Kim Seonghyeon 😊 for translating from the official TensorFlow codes to PyTorch ones. Here is the license of stylegan2-pytorch.
84
- The official repository is https://github.com/NVlabs/stylegan2, and here is the NVIDIA license.
85
- DFDNet
86
- The codes are largely modified from the repository DFDNet. Their license is Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License.
87
-
88
- Terms of the Nvidia License:
89
- ---------------------------------------------
90
-
91
- 1. Definitions
92
-
93
- "Licensor" means any person or entity that distributes its Work.
94
-
95
- "Software" means the original work of authorship made available under
96
- this License.
97
-
98
- "Work" means the Software and any additions to or derivative works of
99
- the Software that are made available under this License.
100
-
101
- "Nvidia Processors" means any central processing unit (CPU), graphics
102
- processing unit (GPU), field-programmable gate array (FPGA),
103
- application-specific integrated circuit (ASIC) or any combination
104
- thereof designed, made, sold, or provided by Nvidia or its affiliates.
105
-
106
- The terms "reproduce," "reproduction," "derivative works," and
107
- "distribution" have the meaning as provided under U.S. copyright law;
108
- provided, however, that for the purposes of this License, derivative
109
- works shall not include works that remain separable from, or merely
110
- link (or bind by name) to the interfaces of, the Work.
111
-
112
- Works, including the Software, are "made available" under this License
113
- by including in or with the Work either (a) a copyright notice
114
- referencing the applicability of this License to the Work, or (b) a
115
- copy of this License.
116
-
117
- 2. License Grants
118
-
119
- 2.1 Copyright Grant. Subject to the terms and conditions of this
120
- License, each Licensor grants to you a perpetual, worldwide,
121
- non-exclusive, royalty-free, copyright license to reproduce,
122
- prepare derivative works of, publicly display, publicly perform,
123
- sublicense and distribute its Work and any resulting derivative
124
- works in any form.
125
-
126
- 3. Limitations
127
-
128
- 3.1 Redistribution. You may reproduce or distribute the Work only
129
- if (a) you do so under this License, (b) you include a complete
130
- copy of this License with your distribution, and (c) you retain
131
- without modification any copyright, patent, trademark, or
132
- attribution notices that are present in the Work.
133
-
134
- 3.2 Derivative Works. You may specify that additional or different
135
- terms apply to the use, reproduction, and distribution of your
136
- derivative works of the Work ("Your Terms") only if (a) Your Terms
137
- provide that the use limitation in Section 3.3 applies to your
138
- derivative works, and (b) you identify the specific derivative
139
- works that are subject to Your Terms. Notwithstanding Your Terms,
140
- this License (including the redistribution requirements in Section
141
- 3.1) will continue to apply to the Work itself.
142
-
143
- 3.3 Use Limitation. The Work and any derivative works thereof only
144
- may be used or intended for use non-commercially. The Work or
145
- derivative works thereof may be used or intended for use by Nvidia
146
- or its affiliates commercially or non-commercially. As used herein,
147
- "non-commercially" means for research or evaluation purposes only.
148
-
149
- 3.4 Patent Claims. If you bring or threaten to bring a patent claim
150
- against any Licensor (including any claim, cross-claim or
151
- counterclaim in a lawsuit) to enforce any patents that you allege
152
- are infringed by any Work, then your rights under this License from
153
- such Licensor (including the grants in Sections 2.1 and 2.2) will
154
- terminate immediately.
155
-
156
- 3.5 Trademarks. This License does not grant any rights to use any
157
- Licensor's or its affiliates' names, logos, or trademarks, except
158
- as necessary to reproduce the notices described in this License.
159
-
160
- 3.6 Termination. If you violate any term of this License, then your
161
- rights under this License (including the grants in Sections 2.1 and
162
- 2.2) will terminate immediately.
163
-
164
- 4. Disclaimer of Warranty.
165
-
166
- THE WORK IS PROVIDED "AS IS" WITHOUT WARRANTIES OR CONDITIONS OF ANY
167
- KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WARRANTIES OR CONDITIONS OF
168
- MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE OR
169
- NON-INFRINGEMENT. YOU BEAR THE RISK OF UNDERTAKING ANY ACTIVITIES UNDER
170
- THIS LICENSE.
171
-
172
- 5. Limitation of Liability.
173
-
174
- EXCEPT AS PROHIBITED BY APPLICABLE LAW, IN NO EVENT AND UNDER NO LEGAL
175
- THEORY, WHETHER IN TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE
176
- SHALL ANY LICENSOR BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY DIRECT,
177
- INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF
178
- OR RELATED TO THIS LICENSE, THE USE OR INABILITY TO USE THE WORK
179
- (INCLUDING BUT NOT LIMITED TO LOSS OF GOODWILL, BUSINESS INTERRUPTION,
180
- LOST PROFITS OR DATA, COMPUTER FAILURE OR MALFUNCTION, OR ANY OTHER
181
- COMMERCIAL DAMAGES OR LOSSES), EVEN IF THE LICENSOR HAS BEEN ADVISED OF
182
- THE POSSIBILITY OF SUCH DAMAGES.
183
-
184
- MIT License
185
-
186
- Copyright (c) 2019 Kim Seonghyeon
187
-
188
- Permission is hereby granted, free of charge, to any person obtaining a copy
189
- of this software and associated documentation files (the "Software"), to deal
190
- in the Software without restriction, including without limitation the rights
191
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
192
- copies of the Software, and to permit persons to whom the Software is
193
- furnished to do so, subject to the following conditions:
194
-
195
- The above copyright notice and this permission notice shall be included in all
196
- copies or substantial portions of the Software.
197
-
198
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
199
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
200
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
201
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
202
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
203
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
204
- SOFTWARE.
205
-
206
-
207
-
208
- Open Source Software licensed under the BSD 3-Clause license:
209
- ---------------------------------------------
210
- 1. torchvision
211
- Copyright (c) Soumith Chintala 2016,
212
- All rights reserved.
213
-
214
- 2. torch
215
- Copyright (c) 2016- Facebook, Inc (Adam Paszke)
216
- Copyright (c) 2014- Facebook, Inc (Soumith Chintala)
217
- Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
218
- Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
219
- Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
220
- Copyright (c) 2011-2013 NYU (Clement Farabet)
221
- Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)
222
- Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
223
- Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)
224
-
225
-
226
- Terms of the BSD 3-Clause License:
227
- ---------------------------------------------
228
- Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
229
-
230
- 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
231
-
232
- 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
233
-
234
- 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
235
-
236
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
237
-
238
-
239
-
240
- Open Source Software licensed under the BSD 3-Clause License and Other Licenses of the Third-Party Components therein:
241
- ---------------------------------------------
242
- 1. numpy
243
- Copyright (c) 2005-2020, NumPy Developers.
244
- All rights reserved.
245
-
246
- A copy of BSD 3-Clause License is included in this file.
247
-
248
- The NumPy repository and source distributions bundle several libraries that are
249
- compatibly licensed. We list these here.
250
-
251
- Name: Numpydoc
252
- Files: doc/sphinxext/numpydoc/*
253
- License: BSD-2-Clause
254
- For details, see doc/sphinxext/LICENSE.txt
255
-
256
- Name: scipy-sphinx-theme
257
- Files: doc/scipy-sphinx-theme/*
258
- License: BSD-3-Clause AND PSF-2.0 AND Apache-2.0
259
- For details, see doc/scipy-sphinx-theme/LICENSE.txt
260
-
261
- Name: lapack-lite
262
- Files: numpy/linalg/lapack_lite/*
263
- License: BSD-3-Clause
264
- For details, see numpy/linalg/lapack_lite/LICENSE.txt
265
-
266
- Name: tempita
267
- Files: tools/npy_tempita/*
268
- License: MIT
269
- For details, see tools/npy_tempita/license.txt
270
-
271
- Name: dragon4
272
- Files: numpy/core/src/multiarray/dragon4.c
273
- License: MIT
274
- For license text, see numpy/core/src/multiarray/dragon4.c
275
-
276
-
277
-
278
- Open Source Software licensed under the MIT license:
279
- ---------------------------------------------
280
- 1. facexlib
281
- Copyright (c) 2020 Xintao Wang
282
-
283
- 2. opencv-python
284
- Copyright (c) Olli-Pekka Heinisuo
285
- Please note that only files in cv2 package are used.
286
-
287
-
288
- Terms of the MIT License:
289
- ---------------------------------------------
290
- Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
291
-
292
- The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
293
-
294
- THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
295
-
296
-
297
-
298
- Open Source Software licensed under the MIT license and Other Licenses of the Third-Party Components therein:
299
- ---------------------------------------------
300
- 1. tqdm
301
- Copyright (c) 2013 noamraph
302
-
303
- `tqdm` is a product of collaborative work.
304
- Unless otherwise stated, all authors (see commit logs) retain copyright
305
- for their respective work, and release the work under the MIT licence
306
- (text below).
307
-
308
- Exceptions or notable authors are listed below
309
- in reverse chronological order:
310
-
311
- * files: *
312
- MPLv2.0 2015-2020 (c) Casper da Costa-Luis
313
- [casperdcl](https://github.com/casperdcl).
314
- * files: tqdm/_tqdm.py
315
- MIT 2016 (c) [PR #96] on behalf of Google Inc.
316
- * files: tqdm/_tqdm.py setup.py README.rst MANIFEST.in .gitignore
317
- MIT 2013 (c) Noam Yorav-Raphael, original author.
318
-
319
- [PR #96]: https://github.com/tqdm/tqdm/pull/96
320
-
321
-
322
- Mozilla Public Licence (MPL) v. 2.0 - Exhibit A
323
- -----------------------------------------------
324
-
325
- This Source Code Form is subject to the terms of the
326
- Mozilla Public License, v. 2.0.
327
- If a copy of the MPL was not distributed with this file,
328
- You can obtain one at https://mozilla.org/MPL/2.0/.
329
-
330
-
331
- MIT License (MIT)
332
- -----------------
333
-
334
- Copyright (c) 2013 noamraph
335
-
336
- Permission is hereby granted, free of charge, to any person obtaining a copy of
337
- this software and associated documentation files (the "Software"), to deal in
338
- the Software without restriction, including without limitation the rights to
339
- use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
340
- the Software, and to permit persons to whom the Software is furnished to do so,
341
- subject to the following conditions:
342
-
343
- The above copyright notice and this permission notice shall be included in all
344
- copies or substantial portions of the Software.
345
-
346
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
347
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
348
- FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
349
- COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
350
- IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
351
- CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
MANIFEST.in DELETED
@@ -1,8 +0,0 @@
1
- include assets/*
2
- include inputs/*
3
- include scripts/*.py
4
- include inference_gfpgan.py
5
- include VERSION
6
- include LICENSE
7
- include requirements.txt
8
- include gfpgan/weights/README.md
 
 
 
 
 
 
 
 
 
PaperModel.md DELETED
@@ -1,76 +0,0 @@
1
- # Installation
2
-
3
- We now provide a *clean* version of GFPGAN, which does not require customized CUDA extensions. See [here](README.md#installation) for this easier installation.<br>
4
- If you want want to use the original model in our paper, please follow the instructions below.
5
-
6
- 1. Clone repo
7
-
8
- ```bash
9
- git clone https://github.com/xinntao/GFPGAN.git
10
- cd GFPGAN
11
- ```
12
-
13
- 1. Install dependent packages
14
-
15
- As StyleGAN2 uses customized PyTorch C++ extensions, you need to **compile them during installation** or **load them just-in-time(JIT)**.
16
- You can refer to [BasicSR-INSTALL.md](https://github.com/xinntao/BasicSR/blob/master/INSTALL.md) for more details.
17
-
18
- **Option 1: Load extensions just-in-time(JIT)** (For those just want to do simple inferences, may have less issues)
19
-
20
- ```bash
21
- # Install basicsr - https://github.com/xinntao/BasicSR
22
- # We use BasicSR for both training and inference
23
- pip install basicsr
24
-
25
- # Install facexlib - https://github.com/xinntao/facexlib
26
- # We use face detection and face restoration helper in the facexlib package
27
- pip install facexlib
28
-
29
- pip install -r requirements.txt
30
- python setup.py develop
31
-
32
- # remember to set BASICSR_JIT=True before your running commands
33
- ```
34
-
35
- **Option 2: Compile extensions during installation** (For those need to train/inference for many times)
36
-
37
- ```bash
38
- # Install basicsr - https://github.com/xinntao/BasicSR
39
- # We use BasicSR for both training and inference
40
- # Set BASICSR_EXT=True to compile the cuda extensions in the BasicSR - It may take several minutes to compile, please be patient
41
- # Add -vvv for detailed log prints
42
- BASICSR_EXT=True pip install basicsr -vvv
43
-
44
- # Install facexlib - https://github.com/xinntao/facexlib
45
- # We use face detection and face restoration helper in the facexlib package
46
- pip install facexlib
47
-
48
- pip install -r requirements.txt
49
- python setup.py develop
50
- ```
51
-
52
- ## :zap: Quick Inference
53
-
54
- Download pre-trained models: [GFPGANv1.pth](https://github.com/TencentARC/GFPGAN/releases/download/v0.1.0/GFPGANv1.pth)
55
-
56
- ```bash
57
- wget https://github.com/TencentARC/GFPGAN/releases/download/v0.1.0/GFPGANv1.pth -P experiments/pretrained_models
58
- ```
59
-
60
- - Option 1: Load extensions just-in-time(JIT)
61
-
62
- ```bash
63
- BASICSR_JIT=True python inference_gfpgan.py --model_path experiments/pretrained_models/GFPGANv1.pth --test_path inputs/whole_imgs --save_root results --arch original --channel 1
64
-
65
- # for aligned images
66
- BASICSR_JIT=True python inference_gfpgan.py --model_path experiments/pretrained_models/GFPGANv1.pth --test_path inputs/cropped_faces --save_root results --arch original --channel 1 --aligned
67
- ```
68
-
69
- - Option 2: Have successfully compiled extensions during installation
70
-
71
- ```bash
72
- python inference_gfpgan.py --model_path experiments/pretrained_models/GFPGANv1.pth --test_path inputs/whole_imgs --save_root results --arch original --channel 1
73
-
74
- # for aligned images
75
- python inference_gfpgan.py --model_path experiments/pretrained_models/GFPGANv1.pth --test_path inputs/cropped_faces --save_root results --arch original --channel 1 --aligned
76
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md CHANGED
@@ -1,34 +1,13 @@
1
  ---
2
  title: GFPGAN
3
- emoji: 📚
4
  colorFrom: green
5
  colorTo: blue
6
  sdk: gradio
7
- sdk_version: 3.1.3
8
  app_file: app.py
9
  pinned: false
 
10
  ---
11
 
12
- # Configuration
13
-
14
- `title`: _string_
15
- Display title for the Space
16
-
17
- `emoji`: _string_
18
- Space emoji (emoji-only character allowed)
19
-
20
- `colorFrom`: _string_
21
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
22
-
23
- `colorTo`: _string_
24
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
25
-
26
- `sdk`: _string_
27
- Can be either `gradio` or `streamlit`
28
-
29
- `app_file`: _string_
30
- Path to your main application file (which contains either `gradio` or `streamlit` Python code).
31
- Path is relative to the root of the repository.
32
-
33
- `pinned`: _boolean_
34
- Whether the Space stays on top of your list.
 
1
  ---
2
  title: GFPGAN
3
+ emoji: 😁
4
  colorFrom: green
5
  colorTo: blue
6
  sdk: gradio
7
+ sdk_version: 3.1.7
8
  app_file: app.py
9
  pinned: false
10
+ license: apache-2.0
11
  ---
12
 
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
VERSION DELETED
@@ -1 +0,0 @@
1
- 0.2.4
 
 
app.py CHANGED
@@ -1,68 +1,116 @@
1
  import os
2
- os.system("pip install gfpgan")
3
 
4
- os.system("pip freeze")
5
- os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v0.2.0/GFPGANCleanv1-NoCE-C2.pth -P .")
6
- import random
7
  import gradio as gr
8
- from PIL import Image
9
  import torch
10
- torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/thumb/a/ab/Abraham_Lincoln_O-77_matte_collodion_print.jpg/1024px-Abraham_Lincoln_O-77_matte_collodion_print.jpg', 'lincoln.jpg')
11
- torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/5/50/Albert_Einstein_%28Nobel%29.png', 'einstein.png')
12
- torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/thumb/9/9d/Thomas_Edison2.jpg/1024px-Thomas_Edison2.jpg', 'edison.jpg')
13
- torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/thumb/a/a9/Henry_Ford_1888.jpg/1024px-Henry_Ford_1888.jpg', 'Henry.jpg')
14
- torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/thumb/0/06/Frida_Kahlo%2C_by_Guillermo_Kahlo.jpg/800px-Frida_Kahlo%2C_by_Guillermo_Kahlo.jpg', 'Frida.jpg')
15
 
 
 
 
 
16
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
 
 
 
 
 
18
 
19
- import cv2
20
- import glob
21
- import numpy as np
22
- from basicsr.utils import imwrite
23
- from gfpgan import GFPGANer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
- import warnings
26
- warnings.warn('The unoptimized RealESRGAN is very slow on CPU. We do not use it. '
27
- 'If you really want to use it, please modify the corresponding codes.')
28
- bg_upsampler = None
 
 
 
 
 
 
29
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
31
-
32
- # set up GFPGAN restorer
33
- restorer = GFPGANer(
34
- model_path='GFPGANCleanv1-NoCE-C2.pth',
35
- upscale=2,
36
- arch='clean',
37
- channel_multiplier=2,
38
- bg_upsampler=bg_upsampler)
39
 
40
 
 
 
 
 
 
 
 
41
 
 
 
 
42
 
 
43
 
44
- def inference(img):
45
- input_img = cv2.imread(img, cv2.IMREAD_COLOR)
46
- cropped_faces, restored_faces, restored_img = restorer.enhance(
47
- input_img, has_aligned=False, only_center_face=False, paste_back=True)
48
-
49
- return Image.fromarray(restored_faces[0][:,:,::-1])
50
-
51
- title = "GFP-GAN"
52
- description = "Gradio demo for GFP-GAN: Towards Real-World Blind Face Restoration with Generative Facial Prior. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Please click submit only once"
53
- article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2101.04061' target='_blank'>Towards Real-World Blind Face Restoration with Generative Facial Prior</a> | <a href='https://github.com/TencentARC/GFPGAN' target='_blank'>Github Repo</a></p><center><img src='https://visitor-badge.glitch.me/badge?page_id=akhaliq_GFPGAN' alt='visitor badge'></center>"
54
  gr.Interface(
55
- inference,
56
- [gr.inputs.Image(type="filepath", label="Input")],
57
- gr.outputs.Image(type="pil", label="Output"),
 
 
 
 
 
58
  title=title,
59
  description=description,
60
  article=article,
61
- examples=[
62
- ['lincoln.jpg'],
63
- ['einstein.png'],
64
- ['edison.jpg'],
65
- ['Henry.jpg'],
66
- ['Frida.jpg']
67
- ]
68
- ).launch()
 
1
  import os
 
2
 
3
+ import cv2
 
 
4
  import gradio as gr
 
5
  import torch
6
+ from basicsr.archs.srvgg_arch import SRVGGNetCompact
7
+ from gfpgan.utils import GFPGANer
8
+ from realesrgan.utils import RealESRGANer
 
 
9
 
10
+ os.system("pip freeze")
11
+ os.system("wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth -P .")
12
+ os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.2.pth -P .")
13
+ os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth -P .")
14
 
15
+ torch.hub.download_url_to_file(
16
+ 'https://upload.wikimedia.org/wikipedia/commons/thumb/a/ab/Abraham_Lincoln_O-77_matte_collodion_print.jpg/1024px-Abraham_Lincoln_O-77_matte_collodion_print.jpg',
17
+ 'lincoln.jpg')
18
+ torch.hub.download_url_to_file(
19
+ 'https://user-images.githubusercontent.com/17445847/187400315-87a90ac9-d231-45d6-b377-38702bd1838f.jpg',
20
+ 'AI-generate.jpg')
21
+ torch.hub.download_url_to_file(
22
+ 'https://user-images.githubusercontent.com/17445847/187400981-8a58f7a4-ef61-42d9-af80-bc6234cef860.jpg',
23
+ 'Blake_Lively.jpg')
24
+ torch.hub.download_url_to_file(
25
+ 'https://user-images.githubusercontent.com/17445847/187401133-8a3bf269-5b4d-4432-b2f0-6d26ee1d3307.png',
26
+ '10045.png')
27
 
28
+ # background enhancer with RealESRGAN
29
+ model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu')
30
+ model_path = 'realesr-general-x4v3.pth'
31
+ half = True if torch.cuda.is_available() else False
32
+ upsampler = RealESRGANer(scale=4, model_path=model_path, model=model, tile=0, tile_pad=10, pre_pad=0, half=half)
33
 
34
+ # Use GFPGAN for face enhancement
35
+ face_enhancer_v3 = GFPGANer(
36
+ model_path='GFPGANv1.3.pth', upscale=2, arch='clean', channel_multiplier=2, bg_upsampler=upsampler)
37
+ face_enhancer_v2 = GFPGANer(
38
+ model_path='GFPGANv1.2.pth', upscale=2, arch='clean', channel_multiplier=2, bg_upsampler=upsampler)
39
+ os.makedirs('output', exist_ok=True)
40
+
41
+
42
+ def inference(img, version, scale):
43
+ print(img, version, scale)
44
+ try:
45
+ img = cv2.imread(img, cv2.IMREAD_UNCHANGED)
46
+ if len(img.shape) == 3 and img.shape[2] == 4:
47
+ img_mode = 'RGBA'
48
+ else:
49
+ img_mode = None
50
+
51
+ h, w = img.shape[0:2]
52
+ if h < 300:
53
+ img = cv2.resize(img, (w * 2, h * 2), interpolation=cv2.INTER_LANCZOS4)
54
 
55
+ if version == 'v1.2':
56
+ face_enhancer = face_enhancer_v2
57
+ else:
58
+ face_enhancer = face_enhancer_v3
59
+ try:
60
+ _, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True)
61
+ except RuntimeError as error:
62
+ print('Error', error)
63
+ else:
64
+ extension = 'png'
65
 
66
+ try:
67
+ if scale != 2:
68
+ interpolation = cv2.INTER_AREA if scale < 2 else cv2.INTER_LANCZOS4
69
+ h, w = img.shape[0:2]
70
+ output = cv2.resize(output, (int(w * scale / 2), int(h * scale / 2)), interpolation=interpolation)
71
+ except Exception as error:
72
+ print('wrong scale input.', error)
73
+ if img_mode == 'RGBA': # RGBA images should be saved in png format
74
+ extension = 'png'
75
+ else:
76
+ extension = 'jpg'
77
+ save_path = f'output/out.{extension}'
78
+ cv2.imwrite(save_path, output)
79
 
80
+ output = cv2.cvtColor(output, cv2.COLOR_BGR2RGB)
81
+ return output, save_path
82
+ except Exception as error:
83
+ print('global exception', error)
84
+ return None, None
 
 
 
85
 
86
 
87
+ title = "GFPGAN: Practical Face Restoration Algorithm"
88
+ description = r"""Gradio demo for <a href='https://github.com/TencentARC/GFPGAN' target='_blank'><b>GFPGAN: Towards Real-World Blind Face Restoration with Generative Facial Prior</b></a>.<br>
89
+ It can be used to restore your **old photos** or improve **AI-generated faces**.<br>
90
+ To use it, simply upload your image.<br>
91
+ If GFPGAN is helpful, please help to ⭐ the <a href='https://github.com/TencentARC/GFPGAN' target='_blank'>Github Repo</a> and recommend it to your friends 😊
92
+ """
93
+ article = r"""
94
 
95
+ [![download](https://img.shields.io/github/downloads/TencentARC/GFPGAN/total.svg)](https://github.com/TencentARC/GFPGAN/releases)
96
+ [![GitHub Stars](https://img.shields.io/github/stars/TencentARC/GFPGAN?style=social)](https://github.com/TencentARC/GFPGAN)
97
+ [![arXiv](https://img.shields.io/badge/arXiv-Paper-<COLOR>.svg)](https://arxiv.org/abs/2101.04061)
98
 
99
+ If you have any question, please email 📧 `xintao.wang@outlook.com` or `xintaowang@tencent.com`.
100
 
101
+ <center><img src='https://visitor-badge.glitch.me/badge?page_id=akhaliq_GFPGAN' alt='visitor badge'></center>
102
+ """
 
 
 
 
 
 
 
 
103
  gr.Interface(
104
+ inference, [
105
+ gr.inputs.Image(type="filepath", label="Input"),
106
+ gr.inputs.Radio(['v1.2', 'v1.3'], type="value", default='v1.3', label='GFPGAN version'),
107
+ gr.inputs.Number(label="Rescaling factor", default=2)
108
+ ], [
109
+ gr.outputs.Image(type="numpy", label="Output (The whole image)"),
110
+ gr.outputs.File(label="Download the output image")
111
+ ],
112
  title=title,
113
  description=description,
114
  article=article,
115
+ examples=[['AI-generate.jpg', 'v1.3', 2], ['lincoln.jpg', 'v1.3', 2], ['Blake_Lively.jpg', 'v1.3', 2],
116
+ ['10045.png', 'v1.3', 2]]).launch()
 
 
 
 
 
 
experiments/pretrained_models/README.md DELETED
@@ -1,7 +0,0 @@
1
- # Pre-trained Models and Other Data
2
-
3
- Download pre-trained models and other data. Put them in this folder.
4
-
5
- 1. [Pretrained StyleGAN2 model: StyleGAN2_512_Cmul1_FFHQ_B12G4_scratch_800k.pth](https://github.com/TencentARC/GFPGAN/releases/download/v0.1.0/StyleGAN2_512_Cmul1_FFHQ_B12G4_scratch_800k.pth)
6
- 1. [Component locations of FFHQ: FFHQ_eye_mouth_landmarks_512.pth](https://github.com/TencentARC/GFPGAN/releases/download/v0.1.0/FFHQ_eye_mouth_landmarks_512.pth)
7
- 1. [A simple ArcFace model: arcface_resnet18.pth](https://github.com/TencentARC/GFPGAN/releases/download/v0.1.0/arcface_resnet18.pth)
 
 
 
 
 
 
 
 
gfpgan/__init__.py DELETED
@@ -1,7 +0,0 @@
1
- # flake8: noqa
2
- from .archs import *
3
- from .data import *
4
- from .models import *
5
- from .utils import *
6
-
7
- # from .version import *
 
 
 
 
 
 
 
 
gfpgan/archs/__init__.py DELETED
@@ -1,10 +0,0 @@
1
- import importlib
2
- from basicsr.utils import scandir
3
- from os import path as osp
4
-
5
- # automatically scan and import arch modules for registry
6
- # scan all the files that end with '_arch.py' under the archs folder
7
- arch_folder = osp.dirname(osp.abspath(__file__))
8
- arch_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(arch_folder) if v.endswith('_arch.py')]
9
- # import all the arch modules
10
- _arch_modules = [importlib.import_module(f'gfpgan.archs.{file_name}') for file_name in arch_filenames]
 
 
 
 
 
 
 
 
 
 
 
gfpgan/archs/arcface_arch.py DELETED
@@ -1,245 +0,0 @@
1
- import torch.nn as nn
2
- from basicsr.utils.registry import ARCH_REGISTRY
3
-
4
-
5
- def conv3x3(inplanes, outplanes, stride=1):
6
- """A simple wrapper for 3x3 convolution with padding.
7
-
8
- Args:
9
- inplanes (int): Channel number of inputs.
10
- outplanes (int): Channel number of outputs.
11
- stride (int): Stride in convolution. Default: 1.
12
- """
13
- return nn.Conv2d(inplanes, outplanes, kernel_size=3, stride=stride, padding=1, bias=False)
14
-
15
-
16
- class BasicBlock(nn.Module):
17
- """Basic residual block used in the ResNetArcFace architecture.
18
-
19
- Args:
20
- inplanes (int): Channel number of inputs.
21
- planes (int): Channel number of outputs.
22
- stride (int): Stride in convolution. Default: 1.
23
- downsample (nn.Module): The downsample module. Default: None.
24
- """
25
- expansion = 1 # output channel expansion ratio
26
-
27
- def __init__(self, inplanes, planes, stride=1, downsample=None):
28
- super(BasicBlock, self).__init__()
29
- self.conv1 = conv3x3(inplanes, planes, stride)
30
- self.bn1 = nn.BatchNorm2d(planes)
31
- self.relu = nn.ReLU(inplace=True)
32
- self.conv2 = conv3x3(planes, planes)
33
- self.bn2 = nn.BatchNorm2d(planes)
34
- self.downsample = downsample
35
- self.stride = stride
36
-
37
- def forward(self, x):
38
- residual = x
39
-
40
- out = self.conv1(x)
41
- out = self.bn1(out)
42
- out = self.relu(out)
43
-
44
- out = self.conv2(out)
45
- out = self.bn2(out)
46
-
47
- if self.downsample is not None:
48
- residual = self.downsample(x)
49
-
50
- out += residual
51
- out = self.relu(out)
52
-
53
- return out
54
-
55
-
56
- class IRBlock(nn.Module):
57
- """Improved residual block (IR Block) used in the ResNetArcFace architecture.
58
-
59
- Args:
60
- inplanes (int): Channel number of inputs.
61
- planes (int): Channel number of outputs.
62
- stride (int): Stride in convolution. Default: 1.
63
- downsample (nn.Module): The downsample module. Default: None.
64
- use_se (bool): Whether use the SEBlock (squeeze and excitation block). Default: True.
65
- """
66
- expansion = 1 # output channel expansion ratio
67
-
68
- def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True):
69
- super(IRBlock, self).__init__()
70
- self.bn0 = nn.BatchNorm2d(inplanes)
71
- self.conv1 = conv3x3(inplanes, inplanes)
72
- self.bn1 = nn.BatchNorm2d(inplanes)
73
- self.prelu = nn.PReLU()
74
- self.conv2 = conv3x3(inplanes, planes, stride)
75
- self.bn2 = nn.BatchNorm2d(planes)
76
- self.downsample = downsample
77
- self.stride = stride
78
- self.use_se = use_se
79
- if self.use_se:
80
- self.se = SEBlock(planes)
81
-
82
- def forward(self, x):
83
- residual = x
84
- out = self.bn0(x)
85
- out = self.conv1(out)
86
- out = self.bn1(out)
87
- out = self.prelu(out)
88
-
89
- out = self.conv2(out)
90
- out = self.bn2(out)
91
- if self.use_se:
92
- out = self.se(out)
93
-
94
- if self.downsample is not None:
95
- residual = self.downsample(x)
96
-
97
- out += residual
98
- out = self.prelu(out)
99
-
100
- return out
101
-
102
-
103
- class Bottleneck(nn.Module):
104
- """Bottleneck block used in the ResNetArcFace architecture.
105
-
106
- Args:
107
- inplanes (int): Channel number of inputs.
108
- planes (int): Channel number of outputs.
109
- stride (int): Stride in convolution. Default: 1.
110
- downsample (nn.Module): The downsample module. Default: None.
111
- """
112
- expansion = 4 # output channel expansion ratio
113
-
114
- def __init__(self, inplanes, planes, stride=1, downsample=None):
115
- super(Bottleneck, self).__init__()
116
- self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
117
- self.bn1 = nn.BatchNorm2d(planes)
118
- self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
119
- self.bn2 = nn.BatchNorm2d(planes)
120
- self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
121
- self.bn3 = nn.BatchNorm2d(planes * self.expansion)
122
- self.relu = nn.ReLU(inplace=True)
123
- self.downsample = downsample
124
- self.stride = stride
125
-
126
- def forward(self, x):
127
- residual = x
128
-
129
- out = self.conv1(x)
130
- out = self.bn1(out)
131
- out = self.relu(out)
132
-
133
- out = self.conv2(out)
134
- out = self.bn2(out)
135
- out = self.relu(out)
136
-
137
- out = self.conv3(out)
138
- out = self.bn3(out)
139
-
140
- if self.downsample is not None:
141
- residual = self.downsample(x)
142
-
143
- out += residual
144
- out = self.relu(out)
145
-
146
- return out
147
-
148
-
149
- class SEBlock(nn.Module):
150
- """The squeeze-and-excitation block (SEBlock) used in the IRBlock.
151
-
152
- Args:
153
- channel (int): Channel number of inputs.
154
- reduction (int): Channel reduction ration. Default: 16.
155
- """
156
-
157
- def __init__(self, channel, reduction=16):
158
- super(SEBlock, self).__init__()
159
- self.avg_pool = nn.AdaptiveAvgPool2d(1) # pool to 1x1 without spatial information
160
- self.fc = nn.Sequential(
161
- nn.Linear(channel, channel // reduction), nn.PReLU(), nn.Linear(channel // reduction, channel),
162
- nn.Sigmoid())
163
-
164
- def forward(self, x):
165
- b, c, _, _ = x.size()
166
- y = self.avg_pool(x).view(b, c)
167
- y = self.fc(y).view(b, c, 1, 1)
168
- return x * y
169
-
170
-
171
- @ARCH_REGISTRY.register()
172
- class ResNetArcFace(nn.Module):
173
- """ArcFace with ResNet architectures.
174
-
175
- Ref: ArcFace: Additive Angular Margin Loss for Deep Face Recognition.
176
-
177
- Args:
178
- block (str): Block used in the ArcFace architecture.
179
- layers (tuple(int)): Block numbers in each layer.
180
- use_se (bool): Whether use the SEBlock (squeeze and excitation block). Default: True.
181
- """
182
-
183
- def __init__(self, block, layers, use_se=True):
184
- if block == 'IRBlock':
185
- block = IRBlock
186
- self.inplanes = 64
187
- self.use_se = use_se
188
- super(ResNetArcFace, self).__init__()
189
-
190
- self.conv1 = nn.Conv2d(1, 64, kernel_size=3, padding=1, bias=False)
191
- self.bn1 = nn.BatchNorm2d(64)
192
- self.prelu = nn.PReLU()
193
- self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
194
- self.layer1 = self._make_layer(block, 64, layers[0])
195
- self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
196
- self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
197
- self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
198
- self.bn4 = nn.BatchNorm2d(512)
199
- self.dropout = nn.Dropout()
200
- self.fc5 = nn.Linear(512 * 8 * 8, 512)
201
- self.bn5 = nn.BatchNorm1d(512)
202
-
203
- # initialization
204
- for m in self.modules():
205
- if isinstance(m, nn.Conv2d):
206
- nn.init.xavier_normal_(m.weight)
207
- elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
208
- nn.init.constant_(m.weight, 1)
209
- nn.init.constant_(m.bias, 0)
210
- elif isinstance(m, nn.Linear):
211
- nn.init.xavier_normal_(m.weight)
212
- nn.init.constant_(m.bias, 0)
213
-
214
- def _make_layer(self, block, planes, num_blocks, stride=1):
215
- downsample = None
216
- if stride != 1 or self.inplanes != planes * block.expansion:
217
- downsample = nn.Sequential(
218
- nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),
219
- nn.BatchNorm2d(planes * block.expansion),
220
- )
221
- layers = []
222
- layers.append(block(self.inplanes, planes, stride, downsample, use_se=self.use_se))
223
- self.inplanes = planes
224
- for _ in range(1, num_blocks):
225
- layers.append(block(self.inplanes, planes, use_se=self.use_se))
226
-
227
- return nn.Sequential(*layers)
228
-
229
- def forward(self, x):
230
- x = self.conv1(x)
231
- x = self.bn1(x)
232
- x = self.prelu(x)
233
- x = self.maxpool(x)
234
-
235
- x = self.layer1(x)
236
- x = self.layer2(x)
237
- x = self.layer3(x)
238
- x = self.layer4(x)
239
- x = self.bn4(x)
240
- x = self.dropout(x)
241
- x = x.view(x.size(0), -1)
242
- x = self.fc5(x)
243
- x = self.bn5(x)
244
-
245
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
gfpgan/archs/gfpganv1_arch.py DELETED
@@ -1,439 +0,0 @@
1
- import math
2
- import random
3
- import torch
4
- from basicsr.archs.stylegan2_arch import (ConvLayer, EqualConv2d, EqualLinear, ResBlock, ScaledLeakyReLU,
5
- StyleGAN2Generator)
6
- from basicsr.ops.fused_act import FusedLeakyReLU
7
- from basicsr.utils.registry import ARCH_REGISTRY
8
- from torch import nn
9
- from torch.nn import functional as F
10
-
11
-
12
- class StyleGAN2GeneratorSFT(StyleGAN2Generator):
13
- """StyleGAN2 Generator with SFT modulation (Spatial Feature Transform).
14
-
15
- Args:
16
- out_size (int): The spatial size of outputs.
17
- num_style_feat (int): Channel number of style features. Default: 512.
18
- num_mlp (int): Layer number of MLP style layers. Default: 8.
19
- channel_multiplier (int): Channel multiplier for large networks of StyleGAN2. Default: 2.
20
- resample_kernel (list[int]): A list indicating the 1D resample kernel magnitude. A cross production will be
21
- applied to extent 1D resample kernel to 2D resample kernel. Default: (1, 3, 3, 1).
22
- lr_mlp (float): Learning rate multiplier for mlp layers. Default: 0.01.
23
- narrow (float): The narrow ratio for channels. Default: 1.
24
- sft_half (bool): Whether to apply SFT on half of the input channels. Default: False.
25
- """
26
-
27
- def __init__(self,
28
- out_size,
29
- num_style_feat=512,
30
- num_mlp=8,
31
- channel_multiplier=2,
32
- resample_kernel=(1, 3, 3, 1),
33
- lr_mlp=0.01,
34
- narrow=1,
35
- sft_half=False):
36
- super(StyleGAN2GeneratorSFT, self).__init__(
37
- out_size,
38
- num_style_feat=num_style_feat,
39
- num_mlp=num_mlp,
40
- channel_multiplier=channel_multiplier,
41
- resample_kernel=resample_kernel,
42
- lr_mlp=lr_mlp,
43
- narrow=narrow)
44
- self.sft_half = sft_half
45
-
46
- def forward(self,
47
- styles,
48
- conditions,
49
- input_is_latent=False,
50
- noise=None,
51
- randomize_noise=True,
52
- truncation=1,
53
- truncation_latent=None,
54
- inject_index=None,
55
- return_latents=False):
56
- """Forward function for StyleGAN2GeneratorSFT.
57
-
58
- Args:
59
- styles (list[Tensor]): Sample codes of styles.
60
- conditions (list[Tensor]): SFT conditions to generators.
61
- input_is_latent (bool): Whether input is latent style. Default: False.
62
- noise (Tensor | None): Input noise or None. Default: None.
63
- randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True.
64
- truncation (float): The truncation ratio. Default: 1.
65
- truncation_latent (Tensor | None): The truncation latent tensor. Default: None.
66
- inject_index (int | None): The injection index for mixing noise. Default: None.
67
- return_latents (bool): Whether to return style latents. Default: False.
68
- """
69
- # style codes -> latents with Style MLP layer
70
- if not input_is_latent:
71
- styles = [self.style_mlp(s) for s in styles]
72
- # noises
73
- if noise is None:
74
- if randomize_noise:
75
- noise = [None] * self.num_layers # for each style conv layer
76
- else: # use the stored noise
77
- noise = [getattr(self.noises, f'noise{i}') for i in range(self.num_layers)]
78
- # style truncation
79
- if truncation < 1:
80
- style_truncation = []
81
- for style in styles:
82
- style_truncation.append(truncation_latent + truncation * (style - truncation_latent))
83
- styles = style_truncation
84
- # get style latents with injection
85
- if len(styles) == 1:
86
- inject_index = self.num_latent
87
-
88
- if styles[0].ndim < 3:
89
- # repeat latent code for all the layers
90
- latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
91
- else: # used for encoder with different latent code for each layer
92
- latent = styles[0]
93
- elif len(styles) == 2: # mixing noises
94
- if inject_index is None:
95
- inject_index = random.randint(1, self.num_latent - 1)
96
- latent1 = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
97
- latent2 = styles[1].unsqueeze(1).repeat(1, self.num_latent - inject_index, 1)
98
- latent = torch.cat([latent1, latent2], 1)
99
-
100
- # main generation
101
- out = self.constant_input(latent.shape[0])
102
- out = self.style_conv1(out, latent[:, 0], noise=noise[0])
103
- skip = self.to_rgb1(out, latent[:, 1])
104
-
105
- i = 1
106
- for conv1, conv2, noise1, noise2, to_rgb in zip(self.style_convs[::2], self.style_convs[1::2], noise[1::2],
107
- noise[2::2], self.to_rgbs):
108
- out = conv1(out, latent[:, i], noise=noise1)
109
-
110
- # the conditions may have fewer levels
111
- if i < len(conditions):
112
- # SFT part to combine the conditions
113
- if self.sft_half: # only apply SFT to half of the channels
114
- out_same, out_sft = torch.split(out, int(out.size(1) // 2), dim=1)
115
- out_sft = out_sft * conditions[i - 1] + conditions[i]
116
- out = torch.cat([out_same, out_sft], dim=1)
117
- else: # apply SFT to all the channels
118
- out = out * conditions[i - 1] + conditions[i]
119
-
120
- out = conv2(out, latent[:, i + 1], noise=noise2)
121
- skip = to_rgb(out, latent[:, i + 2], skip) # feature back to the rgb space
122
- i += 2
123
-
124
- image = skip
125
-
126
- if return_latents:
127
- return image, latent
128
- else:
129
- return image, None
130
-
131
-
132
- class ConvUpLayer(nn.Module):
133
- """Convolutional upsampling layer. It uses bilinear upsampler + Conv.
134
-
135
- Args:
136
- in_channels (int): Channel number of the input.
137
- out_channels (int): Channel number of the output.
138
- kernel_size (int): Size of the convolving kernel.
139
- stride (int): Stride of the convolution. Default: 1
140
- padding (int): Zero-padding added to both sides of the input. Default: 0.
141
- bias (bool): If ``True``, adds a learnable bias to the output. Default: ``True``.
142
- bias_init_val (float): Bias initialized value. Default: 0.
143
- activate (bool): Whether use activateion. Default: True.
144
- """
145
-
146
- def __init__(self,
147
- in_channels,
148
- out_channels,
149
- kernel_size,
150
- stride=1,
151
- padding=0,
152
- bias=True,
153
- bias_init_val=0,
154
- activate=True):
155
- super(ConvUpLayer, self).__init__()
156
- self.in_channels = in_channels
157
- self.out_channels = out_channels
158
- self.kernel_size = kernel_size
159
- self.stride = stride
160
- self.padding = padding
161
- # self.scale is used to scale the convolution weights, which is related to the common initializations.
162
- self.scale = 1 / math.sqrt(in_channels * kernel_size**2)
163
-
164
- self.weight = nn.Parameter(torch.randn(out_channels, in_channels, kernel_size, kernel_size))
165
-
166
- if bias and not activate:
167
- self.bias = nn.Parameter(torch.zeros(out_channels).fill_(bias_init_val))
168
- else:
169
- self.register_parameter('bias', None)
170
-
171
- # activation
172
- if activate:
173
- if bias:
174
- self.activation = FusedLeakyReLU(out_channels)
175
- else:
176
- self.activation = ScaledLeakyReLU(0.2)
177
- else:
178
- self.activation = None
179
-
180
- def forward(self, x):
181
- # bilinear upsample
182
- out = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False)
183
- # conv
184
- out = F.conv2d(
185
- out,
186
- self.weight * self.scale,
187
- bias=self.bias,
188
- stride=self.stride,
189
- padding=self.padding,
190
- )
191
- # activation
192
- if self.activation is not None:
193
- out = self.activation(out)
194
- return out
195
-
196
-
197
- class ResUpBlock(nn.Module):
198
- """Residual block with upsampling.
199
-
200
- Args:
201
- in_channels (int): Channel number of the input.
202
- out_channels (int): Channel number of the output.
203
- """
204
-
205
- def __init__(self, in_channels, out_channels):
206
- super(ResUpBlock, self).__init__()
207
-
208
- self.conv1 = ConvLayer(in_channels, in_channels, 3, bias=True, activate=True)
209
- self.conv2 = ConvUpLayer(in_channels, out_channels, 3, stride=1, padding=1, bias=True, activate=True)
210
- self.skip = ConvUpLayer(in_channels, out_channels, 1, bias=False, activate=False)
211
-
212
- def forward(self, x):
213
- out = self.conv1(x)
214
- out = self.conv2(out)
215
- skip = self.skip(x)
216
- out = (out + skip) / math.sqrt(2)
217
- return out
218
-
219
-
220
- @ARCH_REGISTRY.register()
221
- class GFPGANv1(nn.Module):
222
- """The GFPGAN architecture: Unet + StyleGAN2 decoder with SFT.
223
-
224
- Ref: GFP-GAN: Towards Real-World Blind Face Restoration with Generative Facial Prior.
225
-
226
- Args:
227
- out_size (int): The spatial size of outputs.
228
- num_style_feat (int): Channel number of style features. Default: 512.
229
- channel_multiplier (int): Channel multiplier for large networks of StyleGAN2. Default: 2.
230
- resample_kernel (list[int]): A list indicating the 1D resample kernel magnitude. A cross production will be
231
- applied to extent 1D resample kernel to 2D resample kernel. Default: (1, 3, 3, 1).
232
- decoder_load_path (str): The path to the pre-trained decoder model (usually, the StyleGAN2). Default: None.
233
- fix_decoder (bool): Whether to fix the decoder. Default: True.
234
-
235
- num_mlp (int): Layer number of MLP style layers. Default: 8.
236
- lr_mlp (float): Learning rate multiplier for mlp layers. Default: 0.01.
237
- input_is_latent (bool): Whether input is latent style. Default: False.
238
- different_w (bool): Whether to use different latent w for different layers. Default: False.
239
- narrow (float): The narrow ratio for channels. Default: 1.
240
- sft_half (bool): Whether to apply SFT on half of the input channels. Default: False.
241
- """
242
-
243
- def __init__(
244
- self,
245
- out_size,
246
- num_style_feat=512,
247
- channel_multiplier=1,
248
- resample_kernel=(1, 3, 3, 1),
249
- decoder_load_path=None,
250
- fix_decoder=True,
251
- # for stylegan decoder
252
- num_mlp=8,
253
- lr_mlp=0.01,
254
- input_is_latent=False,
255
- different_w=False,
256
- narrow=1,
257
- sft_half=False):
258
-
259
- super(GFPGANv1, self).__init__()
260
- self.input_is_latent = input_is_latent
261
- self.different_w = different_w
262
- self.num_style_feat = num_style_feat
263
-
264
- unet_narrow = narrow * 0.5 # by default, use a half of input channels
265
- channels = {
266
- '4': int(512 * unet_narrow),
267
- '8': int(512 * unet_narrow),
268
- '16': int(512 * unet_narrow),
269
- '32': int(512 * unet_narrow),
270
- '64': int(256 * channel_multiplier * unet_narrow),
271
- '128': int(128 * channel_multiplier * unet_narrow),
272
- '256': int(64 * channel_multiplier * unet_narrow),
273
- '512': int(32 * channel_multiplier * unet_narrow),
274
- '1024': int(16 * channel_multiplier * unet_narrow)
275
- }
276
-
277
- self.log_size = int(math.log(out_size, 2))
278
- first_out_size = 2**(int(math.log(out_size, 2)))
279
-
280
- self.conv_body_first = ConvLayer(3, channels[f'{first_out_size}'], 1, bias=True, activate=True)
281
-
282
- # downsample
283
- in_channels = channels[f'{first_out_size}']
284
- self.conv_body_down = nn.ModuleList()
285
- for i in range(self.log_size, 2, -1):
286
- out_channels = channels[f'{2**(i - 1)}']
287
- self.conv_body_down.append(ResBlock(in_channels, out_channels, resample_kernel))
288
- in_channels = out_channels
289
-
290
- self.final_conv = ConvLayer(in_channels, channels['4'], 3, bias=True, activate=True)
291
-
292
- # upsample
293
- in_channels = channels['4']
294
- self.conv_body_up = nn.ModuleList()
295
- for i in range(3, self.log_size + 1):
296
- out_channels = channels[f'{2**i}']
297
- self.conv_body_up.append(ResUpBlock(in_channels, out_channels))
298
- in_channels = out_channels
299
-
300
- # to RGB
301
- self.toRGB = nn.ModuleList()
302
- for i in range(3, self.log_size + 1):
303
- self.toRGB.append(EqualConv2d(channels[f'{2**i}'], 3, 1, stride=1, padding=0, bias=True, bias_init_val=0))
304
-
305
- if different_w:
306
- linear_out_channel = (int(math.log(out_size, 2)) * 2 - 2) * num_style_feat
307
- else:
308
- linear_out_channel = num_style_feat
309
-
310
- self.final_linear = EqualLinear(
311
- channels['4'] * 4 * 4, linear_out_channel, bias=True, bias_init_val=0, lr_mul=1, activation=None)
312
-
313
- # the decoder: stylegan2 generator with SFT modulations
314
- self.stylegan_decoder = StyleGAN2GeneratorSFT(
315
- out_size=out_size,
316
- num_style_feat=num_style_feat,
317
- num_mlp=num_mlp,
318
- channel_multiplier=channel_multiplier,
319
- resample_kernel=resample_kernel,
320
- lr_mlp=lr_mlp,
321
- narrow=narrow,
322
- sft_half=sft_half)
323
-
324
- # load pre-trained stylegan2 model if necessary
325
- if decoder_load_path:
326
- self.stylegan_decoder.load_state_dict(
327
- torch.load(decoder_load_path, map_location=lambda storage, loc: storage)['params_ema'])
328
- # fix decoder without updating params
329
- if fix_decoder:
330
- for _, param in self.stylegan_decoder.named_parameters():
331
- param.requires_grad = False
332
-
333
- # for SFT modulations (scale and shift)
334
- self.condition_scale = nn.ModuleList()
335
- self.condition_shift = nn.ModuleList()
336
- for i in range(3, self.log_size + 1):
337
- out_channels = channels[f'{2**i}']
338
- if sft_half:
339
- sft_out_channels = out_channels
340
- else:
341
- sft_out_channels = out_channels * 2
342
- self.condition_scale.append(
343
- nn.Sequential(
344
- EqualConv2d(out_channels, out_channels, 3, stride=1, padding=1, bias=True, bias_init_val=0),
345
- ScaledLeakyReLU(0.2),
346
- EqualConv2d(out_channels, sft_out_channels, 3, stride=1, padding=1, bias=True, bias_init_val=1)))
347
- self.condition_shift.append(
348
- nn.Sequential(
349
- EqualConv2d(out_channels, out_channels, 3, stride=1, padding=1, bias=True, bias_init_val=0),
350
- ScaledLeakyReLU(0.2),
351
- EqualConv2d(out_channels, sft_out_channels, 3, stride=1, padding=1, bias=True, bias_init_val=0)))
352
-
353
- def forward(self, x, return_latents=False, return_rgb=True, randomize_noise=True):
354
- """Forward function for GFPGANv1.
355
-
356
- Args:
357
- x (Tensor): Input images.
358
- return_latents (bool): Whether to return style latents. Default: False.
359
- return_rgb (bool): Whether return intermediate rgb images. Default: True.
360
- randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True.
361
- """
362
- conditions = []
363
- unet_skips = []
364
- out_rgbs = []
365
-
366
- # encoder
367
- feat = self.conv_body_first(x)
368
- for i in range(self.log_size - 2):
369
- feat = self.conv_body_down[i](feat)
370
- unet_skips.insert(0, feat)
371
-
372
- feat = self.final_conv(feat)
373
-
374
- # style code
375
- style_code = self.final_linear(feat.view(feat.size(0), -1))
376
- if self.different_w:
377
- style_code = style_code.view(style_code.size(0), -1, self.num_style_feat)
378
-
379
- # decode
380
- for i in range(self.log_size - 2):
381
- # add unet skip
382
- feat = feat + unet_skips[i]
383
- # ResUpLayer
384
- feat = self.conv_body_up[i](feat)
385
- # generate scale and shift for SFT layers
386
- scale = self.condition_scale[i](feat)
387
- conditions.append(scale.clone())
388
- shift = self.condition_shift[i](feat)
389
- conditions.append(shift.clone())
390
- # generate rgb images
391
- if return_rgb:
392
- out_rgbs.append(self.toRGB[i](feat))
393
-
394
- # decoder
395
- image, _ = self.stylegan_decoder([style_code],
396
- conditions,
397
- return_latents=return_latents,
398
- input_is_latent=self.input_is_latent,
399
- randomize_noise=randomize_noise)
400
-
401
- return image, out_rgbs
402
-
403
-
404
- @ARCH_REGISTRY.register()
405
- class FacialComponentDiscriminator(nn.Module):
406
- """Facial component (eyes, mouth, noise) discriminator used in GFPGAN.
407
- """
408
-
409
- def __init__(self):
410
- super(FacialComponentDiscriminator, self).__init__()
411
- # It now uses a VGG-style architectrue with fixed model size
412
- self.conv1 = ConvLayer(3, 64, 3, downsample=False, resample_kernel=(1, 3, 3, 1), bias=True, activate=True)
413
- self.conv2 = ConvLayer(64, 128, 3, downsample=True, resample_kernel=(1, 3, 3, 1), bias=True, activate=True)
414
- self.conv3 = ConvLayer(128, 128, 3, downsample=False, resample_kernel=(1, 3, 3, 1), bias=True, activate=True)
415
- self.conv4 = ConvLayer(128, 256, 3, downsample=True, resample_kernel=(1, 3, 3, 1), bias=True, activate=True)
416
- self.conv5 = ConvLayer(256, 256, 3, downsample=False, resample_kernel=(1, 3, 3, 1), bias=True, activate=True)
417
- self.final_conv = ConvLayer(256, 1, 3, bias=True, activate=False)
418
-
419
- def forward(self, x, return_feats=False):
420
- """Forward function for FacialComponentDiscriminator.
421
-
422
- Args:
423
- x (Tensor): Input images.
424
- return_feats (bool): Whether to return intermediate features. Default: False.
425
- """
426
- feat = self.conv1(x)
427
- feat = self.conv3(self.conv2(feat))
428
- rlt_feats = []
429
- if return_feats:
430
- rlt_feats.append(feat.clone())
431
- feat = self.conv5(self.conv4(feat))
432
- if return_feats:
433
- rlt_feats.append(feat.clone())
434
- out = self.final_conv(feat)
435
-
436
- if return_feats:
437
- return out, rlt_feats
438
- else:
439
- return out, None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
gfpgan/archs/gfpganv1_clean_arch.py DELETED
@@ -1,324 +0,0 @@
1
- import math
2
- import random
3
- import torch
4
- from basicsr.utils.registry import ARCH_REGISTRY
5
- from torch import nn
6
- from torch.nn import functional as F
7
-
8
- from .stylegan2_clean_arch import StyleGAN2GeneratorClean
9
-
10
-
11
- class StyleGAN2GeneratorCSFT(StyleGAN2GeneratorClean):
12
- """StyleGAN2 Generator with SFT modulation (Spatial Feature Transform).
13
-
14
- It is the clean version without custom compiled CUDA extensions used in StyleGAN2.
15
-
16
- Args:
17
- out_size (int): The spatial size of outputs.
18
- num_style_feat (int): Channel number of style features. Default: 512.
19
- num_mlp (int): Layer number of MLP style layers. Default: 8.
20
- channel_multiplier (int): Channel multiplier for large networks of StyleGAN2. Default: 2.
21
- narrow (float): The narrow ratio for channels. Default: 1.
22
- sft_half (bool): Whether to apply SFT on half of the input channels. Default: False.
23
- """
24
-
25
- def __init__(self, out_size, num_style_feat=512, num_mlp=8, channel_multiplier=2, narrow=1, sft_half=False):
26
- super(StyleGAN2GeneratorCSFT, self).__init__(
27
- out_size,
28
- num_style_feat=num_style_feat,
29
- num_mlp=num_mlp,
30
- channel_multiplier=channel_multiplier,
31
- narrow=narrow)
32
- self.sft_half = sft_half
33
-
34
- def forward(self,
35
- styles,
36
- conditions,
37
- input_is_latent=False,
38
- noise=None,
39
- randomize_noise=True,
40
- truncation=1,
41
- truncation_latent=None,
42
- inject_index=None,
43
- return_latents=False):
44
- """Forward function for StyleGAN2GeneratorCSFT.
45
-
46
- Args:
47
- styles (list[Tensor]): Sample codes of styles.
48
- conditions (list[Tensor]): SFT conditions to generators.
49
- input_is_latent (bool): Whether input is latent style. Default: False.
50
- noise (Tensor | None): Input noise or None. Default: None.
51
- randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True.
52
- truncation (float): The truncation ratio. Default: 1.
53
- truncation_latent (Tensor | None): The truncation latent tensor. Default: None.
54
- inject_index (int | None): The injection index for mixing noise. Default: None.
55
- return_latents (bool): Whether to return style latents. Default: False.
56
- """
57
- # style codes -> latents with Style MLP layer
58
- if not input_is_latent:
59
- styles = [self.style_mlp(s) for s in styles]
60
- # noises
61
- if noise is None:
62
- if randomize_noise:
63
- noise = [None] * self.num_layers # for each style conv layer
64
- else: # use the stored noise
65
- noise = [getattr(self.noises, f'noise{i}') for i in range(self.num_layers)]
66
- # style truncation
67
- if truncation < 1:
68
- style_truncation = []
69
- for style in styles:
70
- style_truncation.append(truncation_latent + truncation * (style - truncation_latent))
71
- styles = style_truncation
72
- # get style latents with injection
73
- if len(styles) == 1:
74
- inject_index = self.num_latent
75
-
76
- if styles[0].ndim < 3:
77
- # repeat latent code for all the layers
78
- latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
79
- else: # used for encoder with different latent code for each layer
80
- latent = styles[0]
81
- elif len(styles) == 2: # mixing noises
82
- if inject_index is None:
83
- inject_index = random.randint(1, self.num_latent - 1)
84
- latent1 = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
85
- latent2 = styles[1].unsqueeze(1).repeat(1, self.num_latent - inject_index, 1)
86
- latent = torch.cat([latent1, latent2], 1)
87
-
88
- # main generation
89
- out = self.constant_input(latent.shape[0])
90
- out = self.style_conv1(out, latent[:, 0], noise=noise[0])
91
- skip = self.to_rgb1(out, latent[:, 1])
92
-
93
- i = 1
94
- for conv1, conv2, noise1, noise2, to_rgb in zip(self.style_convs[::2], self.style_convs[1::2], noise[1::2],
95
- noise[2::2], self.to_rgbs):
96
- out = conv1(out, latent[:, i], noise=noise1)
97
-
98
- # the conditions may have fewer levels
99
- if i < len(conditions):
100
- # SFT part to combine the conditions
101
- if self.sft_half: # only apply SFT to half of the channels
102
- out_same, out_sft = torch.split(out, int(out.size(1) // 2), dim=1)
103
- out_sft = out_sft * conditions[i - 1] + conditions[i]
104
- out = torch.cat([out_same, out_sft], dim=1)
105
- else: # apply SFT to all the channels
106
- out = out * conditions[i - 1] + conditions[i]
107
-
108
- out = conv2(out, latent[:, i + 1], noise=noise2)
109
- skip = to_rgb(out, latent[:, i + 2], skip) # feature back to the rgb space
110
- i += 2
111
-
112
- image = skip
113
-
114
- if return_latents:
115
- return image, latent
116
- else:
117
- return image, None
118
-
119
-
120
- class ResBlock(nn.Module):
121
- """Residual block with bilinear upsampling/downsampling.
122
-
123
- Args:
124
- in_channels (int): Channel number of the input.
125
- out_channels (int): Channel number of the output.
126
- mode (str): Upsampling/downsampling mode. Options: down | up. Default: down.
127
- """
128
-
129
- def __init__(self, in_channels, out_channels, mode='down'):
130
- super(ResBlock, self).__init__()
131
-
132
- self.conv1 = nn.Conv2d(in_channels, in_channels, 3, 1, 1)
133
- self.conv2 = nn.Conv2d(in_channels, out_channels, 3, 1, 1)
134
- self.skip = nn.Conv2d(in_channels, out_channels, 1, bias=False)
135
- if mode == 'down':
136
- self.scale_factor = 0.5
137
- elif mode == 'up':
138
- self.scale_factor = 2
139
-
140
- def forward(self, x):
141
- out = F.leaky_relu_(self.conv1(x), negative_slope=0.2)
142
- # upsample/downsample
143
- out = F.interpolate(out, scale_factor=self.scale_factor, mode='bilinear', align_corners=False)
144
- out = F.leaky_relu_(self.conv2(out), negative_slope=0.2)
145
- # skip
146
- x = F.interpolate(x, scale_factor=self.scale_factor, mode='bilinear', align_corners=False)
147
- skip = self.skip(x)
148
- out = out + skip
149
- return out
150
-
151
-
152
- @ARCH_REGISTRY.register()
153
- class GFPGANv1Clean(nn.Module):
154
- """The GFPGAN architecture: Unet + StyleGAN2 decoder with SFT.
155
-
156
- It is the clean version without custom compiled CUDA extensions used in StyleGAN2.
157
-
158
- Ref: GFP-GAN: Towards Real-World Blind Face Restoration with Generative Facial Prior.
159
-
160
- Args:
161
- out_size (int): The spatial size of outputs.
162
- num_style_feat (int): Channel number of style features. Default: 512.
163
- channel_multiplier (int): Channel multiplier for large networks of StyleGAN2. Default: 2.
164
- decoder_load_path (str): The path to the pre-trained decoder model (usually, the StyleGAN2). Default: None.
165
- fix_decoder (bool): Whether to fix the decoder. Default: True.
166
-
167
- num_mlp (int): Layer number of MLP style layers. Default: 8.
168
- input_is_latent (bool): Whether input is latent style. Default: False.
169
- different_w (bool): Whether to use different latent w for different layers. Default: False.
170
- narrow (float): The narrow ratio for channels. Default: 1.
171
- sft_half (bool): Whether to apply SFT on half of the input channels. Default: False.
172
- """
173
-
174
- def __init__(
175
- self,
176
- out_size,
177
- num_style_feat=512,
178
- channel_multiplier=1,
179
- decoder_load_path=None,
180
- fix_decoder=True,
181
- # for stylegan decoder
182
- num_mlp=8,
183
- input_is_latent=False,
184
- different_w=False,
185
- narrow=1,
186
- sft_half=False):
187
-
188
- super(GFPGANv1Clean, self).__init__()
189
- self.input_is_latent = input_is_latent
190
- self.different_w = different_w
191
- self.num_style_feat = num_style_feat
192
-
193
- unet_narrow = narrow * 0.5 # by default, use a half of input channels
194
- channels = {
195
- '4': int(512 * unet_narrow),
196
- '8': int(512 * unet_narrow),
197
- '16': int(512 * unet_narrow),
198
- '32': int(512 * unet_narrow),
199
- '64': int(256 * channel_multiplier * unet_narrow),
200
- '128': int(128 * channel_multiplier * unet_narrow),
201
- '256': int(64 * channel_multiplier * unet_narrow),
202
- '512': int(32 * channel_multiplier * unet_narrow),
203
- '1024': int(16 * channel_multiplier * unet_narrow)
204
- }
205
-
206
- self.log_size = int(math.log(out_size, 2))
207
- first_out_size = 2**(int(math.log(out_size, 2)))
208
-
209
- self.conv_body_first = nn.Conv2d(3, channels[f'{first_out_size}'], 1)
210
-
211
- # downsample
212
- in_channels = channels[f'{first_out_size}']
213
- self.conv_body_down = nn.ModuleList()
214
- for i in range(self.log_size, 2, -1):
215
- out_channels = channels[f'{2**(i - 1)}']
216
- self.conv_body_down.append(ResBlock(in_channels, out_channels, mode='down'))
217
- in_channels = out_channels
218
-
219
- self.final_conv = nn.Conv2d(in_channels, channels['4'], 3, 1, 1)
220
-
221
- # upsample
222
- in_channels = channels['4']
223
- self.conv_body_up = nn.ModuleList()
224
- for i in range(3, self.log_size + 1):
225
- out_channels = channels[f'{2**i}']
226
- self.conv_body_up.append(ResBlock(in_channels, out_channels, mode='up'))
227
- in_channels = out_channels
228
-
229
- # to RGB
230
- self.toRGB = nn.ModuleList()
231
- for i in range(3, self.log_size + 1):
232
- self.toRGB.append(nn.Conv2d(channels[f'{2**i}'], 3, 1))
233
-
234
- if different_w:
235
- linear_out_channel = (int(math.log(out_size, 2)) * 2 - 2) * num_style_feat
236
- else:
237
- linear_out_channel = num_style_feat
238
-
239
- self.final_linear = nn.Linear(channels['4'] * 4 * 4, linear_out_channel)
240
-
241
- # the decoder: stylegan2 generator with SFT modulations
242
- self.stylegan_decoder = StyleGAN2GeneratorCSFT(
243
- out_size=out_size,
244
- num_style_feat=num_style_feat,
245
- num_mlp=num_mlp,
246
- channel_multiplier=channel_multiplier,
247
- narrow=narrow,
248
- sft_half=sft_half)
249
-
250
- # load pre-trained stylegan2 model if necessary
251
- if decoder_load_path:
252
- self.stylegan_decoder.load_state_dict(
253
- torch.load(decoder_load_path, map_location=lambda storage, loc: storage)['params_ema'])
254
- # fix decoder without updating params
255
- if fix_decoder:
256
- for _, param in self.stylegan_decoder.named_parameters():
257
- param.requires_grad = False
258
-
259
- # for SFT modulations (scale and shift)
260
- self.condition_scale = nn.ModuleList()
261
- self.condition_shift = nn.ModuleList()
262
- for i in range(3, self.log_size + 1):
263
- out_channels = channels[f'{2**i}']
264
- if sft_half:
265
- sft_out_channels = out_channels
266
- else:
267
- sft_out_channels = out_channels * 2
268
- self.condition_scale.append(
269
- nn.Sequential(
270
- nn.Conv2d(out_channels, out_channels, 3, 1, 1), nn.LeakyReLU(0.2, True),
271
- nn.Conv2d(out_channels, sft_out_channels, 3, 1, 1)))
272
- self.condition_shift.append(
273
- nn.Sequential(
274
- nn.Conv2d(out_channels, out_channels, 3, 1, 1), nn.LeakyReLU(0.2, True),
275
- nn.Conv2d(out_channels, sft_out_channels, 3, 1, 1)))
276
-
277
- def forward(self, x, return_latents=False, return_rgb=True, randomize_noise=True):
278
- """Forward function for GFPGANv1Clean.
279
-
280
- Args:
281
- x (Tensor): Input images.
282
- return_latents (bool): Whether to return style latents. Default: False.
283
- return_rgb (bool): Whether return intermediate rgb images. Default: True.
284
- randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True.
285
- """
286
- conditions = []
287
- unet_skips = []
288
- out_rgbs = []
289
-
290
- # encoder
291
- feat = F.leaky_relu_(self.conv_body_first(x), negative_slope=0.2)
292
- for i in range(self.log_size - 2):
293
- feat = self.conv_body_down[i](feat)
294
- unet_skips.insert(0, feat)
295
- feat = F.leaky_relu_(self.final_conv(feat), negative_slope=0.2)
296
-
297
- # style code
298
- style_code = self.final_linear(feat.view(feat.size(0), -1))
299
- if self.different_w:
300
- style_code = style_code.view(style_code.size(0), -1, self.num_style_feat)
301
-
302
- # decode
303
- for i in range(self.log_size - 2):
304
- # add unet skip
305
- feat = feat + unet_skips[i]
306
- # ResUpLayer
307
- feat = self.conv_body_up[i](feat)
308
- # generate scale and shift for SFT layers
309
- scale = self.condition_scale[i](feat)
310
- conditions.append(scale.clone())
311
- shift = self.condition_shift[i](feat)
312
- conditions.append(shift.clone())
313
- # generate rgb images
314
- if return_rgb:
315
- out_rgbs.append(self.toRGB[i](feat))
316
-
317
- # decoder
318
- image, _ = self.stylegan_decoder([style_code],
319
- conditions,
320
- return_latents=return_latents,
321
- input_is_latent=self.input_is_latent,
322
- randomize_noise=randomize_noise)
323
-
324
- return image, out_rgbs
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
gfpgan/archs/stylegan2_clean_arch.py DELETED
@@ -1,368 +0,0 @@
1
- import math
2
- import random
3
- import torch
4
- from basicsr.archs.arch_util import default_init_weights
5
- from basicsr.utils.registry import ARCH_REGISTRY
6
- from torch import nn
7
- from torch.nn import functional as F
8
-
9
-
10
- class NormStyleCode(nn.Module):
11
-
12
- def forward(self, x):
13
- """Normalize the style codes.
14
-
15
- Args:
16
- x (Tensor): Style codes with shape (b, c).
17
-
18
- Returns:
19
- Tensor: Normalized tensor.
20
- """
21
- return x * torch.rsqrt(torch.mean(x**2, dim=1, keepdim=True) + 1e-8)
22
-
23
-
24
- class ModulatedConv2d(nn.Module):
25
- """Modulated Conv2d used in StyleGAN2.
26
-
27
- There is no bias in ModulatedConv2d.
28
-
29
- Args:
30
- in_channels (int): Channel number of the input.
31
- out_channels (int): Channel number of the output.
32
- kernel_size (int): Size of the convolving kernel.
33
- num_style_feat (int): Channel number of style features.
34
- demodulate (bool): Whether to demodulate in the conv layer. Default: True.
35
- sample_mode (str | None): Indicating 'upsample', 'downsample' or None. Default: None.
36
- eps (float): A value added to the denominator for numerical stability. Default: 1e-8.
37
- """
38
-
39
- def __init__(self,
40
- in_channels,
41
- out_channels,
42
- kernel_size,
43
- num_style_feat,
44
- demodulate=True,
45
- sample_mode=None,
46
- eps=1e-8):
47
- super(ModulatedConv2d, self).__init__()
48
- self.in_channels = in_channels
49
- self.out_channels = out_channels
50
- self.kernel_size = kernel_size
51
- self.demodulate = demodulate
52
- self.sample_mode = sample_mode
53
- self.eps = eps
54
-
55
- # modulation inside each modulated conv
56
- self.modulation = nn.Linear(num_style_feat, in_channels, bias=True)
57
- # initialization
58
- default_init_weights(self.modulation, scale=1, bias_fill=1, a=0, mode='fan_in', nonlinearity='linear')
59
-
60
- self.weight = nn.Parameter(
61
- torch.randn(1, out_channels, in_channels, kernel_size, kernel_size) /
62
- math.sqrt(in_channels * kernel_size**2))
63
- self.padding = kernel_size // 2
64
-
65
- def forward(self, x, style):
66
- """Forward function.
67
-
68
- Args:
69
- x (Tensor): Tensor with shape (b, c, h, w).
70
- style (Tensor): Tensor with shape (b, num_style_feat).
71
-
72
- Returns:
73
- Tensor: Modulated tensor after convolution.
74
- """
75
- b, c, h, w = x.shape # c = c_in
76
- # weight modulation
77
- style = self.modulation(style).view(b, 1, c, 1, 1)
78
- # self.weight: (1, c_out, c_in, k, k); style: (b, 1, c, 1, 1)
79
- weight = self.weight * style # (b, c_out, c_in, k, k)
80
-
81
- if self.demodulate:
82
- demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + self.eps)
83
- weight = weight * demod.view(b, self.out_channels, 1, 1, 1)
84
-
85
- weight = weight.view(b * self.out_channels, c, self.kernel_size, self.kernel_size)
86
-
87
- # upsample or downsample if necessary
88
- if self.sample_mode == 'upsample':
89
- x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False)
90
- elif self.sample_mode == 'downsample':
91
- x = F.interpolate(x, scale_factor=0.5, mode='bilinear', align_corners=False)
92
-
93
- b, c, h, w = x.shape
94
- x = x.view(1, b * c, h, w)
95
- # weight: (b*c_out, c_in, k, k), groups=b
96
- out = F.conv2d(x, weight, padding=self.padding, groups=b)
97
- out = out.view(b, self.out_channels, *out.shape[2:4])
98
-
99
- return out
100
-
101
- def __repr__(self):
102
- return (f'{self.__class__.__name__}(in_channels={self.in_channels}, out_channels={self.out_channels}, '
103
- f'kernel_size={self.kernel_size}, demodulate={self.demodulate}, sample_mode={self.sample_mode})')
104
-
105
-
106
- class StyleConv(nn.Module):
107
- """Style conv used in StyleGAN2.
108
-
109
- Args:
110
- in_channels (int): Channel number of the input.
111
- out_channels (int): Channel number of the output.
112
- kernel_size (int): Size of the convolving kernel.
113
- num_style_feat (int): Channel number of style features.
114
- demodulate (bool): Whether demodulate in the conv layer. Default: True.
115
- sample_mode (str | None): Indicating 'upsample', 'downsample' or None. Default: None.
116
- """
117
-
118
- def __init__(self, in_channels, out_channels, kernel_size, num_style_feat, demodulate=True, sample_mode=None):
119
- super(StyleConv, self).__init__()
120
- self.modulated_conv = ModulatedConv2d(
121
- in_channels, out_channels, kernel_size, num_style_feat, demodulate=demodulate, sample_mode=sample_mode)
122
- self.weight = nn.Parameter(torch.zeros(1)) # for noise injection
123
- self.bias = nn.Parameter(torch.zeros(1, out_channels, 1, 1))
124
- self.activate = nn.LeakyReLU(negative_slope=0.2, inplace=True)
125
-
126
- def forward(self, x, style, noise=None):
127
- # modulate
128
- out = self.modulated_conv(x, style) * 2**0.5 # for conversion
129
- # noise injection
130
- if noise is None:
131
- b, _, h, w = out.shape
132
- noise = out.new_empty(b, 1, h, w).normal_()
133
- out = out + self.weight * noise
134
- # add bias
135
- out = out + self.bias
136
- # activation
137
- out = self.activate(out)
138
- return out
139
-
140
-
141
- class ToRGB(nn.Module):
142
- """To RGB (image space) from features.
143
-
144
- Args:
145
- in_channels (int): Channel number of input.
146
- num_style_feat (int): Channel number of style features.
147
- upsample (bool): Whether to upsample. Default: True.
148
- """
149
-
150
- def __init__(self, in_channels, num_style_feat, upsample=True):
151
- super(ToRGB, self).__init__()
152
- self.upsample = upsample
153
- self.modulated_conv = ModulatedConv2d(
154
- in_channels, 3, kernel_size=1, num_style_feat=num_style_feat, demodulate=False, sample_mode=None)
155
- self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1))
156
-
157
- def forward(self, x, style, skip=None):
158
- """Forward function.
159
-
160
- Args:
161
- x (Tensor): Feature tensor with shape (b, c, h, w).
162
- style (Tensor): Tensor with shape (b, num_style_feat).
163
- skip (Tensor): Base/skip tensor. Default: None.
164
-
165
- Returns:
166
- Tensor: RGB images.
167
- """
168
- out = self.modulated_conv(x, style)
169
- out = out + self.bias
170
- if skip is not None:
171
- if self.upsample:
172
- skip = F.interpolate(skip, scale_factor=2, mode='bilinear', align_corners=False)
173
- out = out + skip
174
- return out
175
-
176
-
177
- class ConstantInput(nn.Module):
178
- """Constant input.
179
-
180
- Args:
181
- num_channel (int): Channel number of constant input.
182
- size (int): Spatial size of constant input.
183
- """
184
-
185
- def __init__(self, num_channel, size):
186
- super(ConstantInput, self).__init__()
187
- self.weight = nn.Parameter(torch.randn(1, num_channel, size, size))
188
-
189
- def forward(self, batch):
190
- out = self.weight.repeat(batch, 1, 1, 1)
191
- return out
192
-
193
-
194
- @ARCH_REGISTRY.register()
195
- class StyleGAN2GeneratorClean(nn.Module):
196
- """Clean version of StyleGAN2 Generator.
197
-
198
- Args:
199
- out_size (int): The spatial size of outputs.
200
- num_style_feat (int): Channel number of style features. Default: 512.
201
- num_mlp (int): Layer number of MLP style layers. Default: 8.
202
- channel_multiplier (int): Channel multiplier for large networks of StyleGAN2. Default: 2.
203
- narrow (float): Narrow ratio for channels. Default: 1.0.
204
- """
205
-
206
- def __init__(self, out_size, num_style_feat=512, num_mlp=8, channel_multiplier=2, narrow=1):
207
- super(StyleGAN2GeneratorClean, self).__init__()
208
- # Style MLP layers
209
- self.num_style_feat = num_style_feat
210
- style_mlp_layers = [NormStyleCode()]
211
- for i in range(num_mlp):
212
- style_mlp_layers.extend(
213
- [nn.Linear(num_style_feat, num_style_feat, bias=True),
214
- nn.LeakyReLU(negative_slope=0.2, inplace=True)])
215
- self.style_mlp = nn.Sequential(*style_mlp_layers)
216
- # initialization
217
- default_init_weights(self.style_mlp, scale=1, bias_fill=0, a=0.2, mode='fan_in', nonlinearity='leaky_relu')
218
-
219
- # channel list
220
- channels = {
221
- '4': int(512 * narrow),
222
- '8': int(512 * narrow),
223
- '16': int(512 * narrow),
224
- '32': int(512 * narrow),
225
- '64': int(256 * channel_multiplier * narrow),
226
- '128': int(128 * channel_multiplier * narrow),
227
- '256': int(64 * channel_multiplier * narrow),
228
- '512': int(32 * channel_multiplier * narrow),
229
- '1024': int(16 * channel_multiplier * narrow)
230
- }
231
- self.channels = channels
232
-
233
- self.constant_input = ConstantInput(channels['4'], size=4)
234
- self.style_conv1 = StyleConv(
235
- channels['4'],
236
- channels['4'],
237
- kernel_size=3,
238
- num_style_feat=num_style_feat,
239
- demodulate=True,
240
- sample_mode=None)
241
- self.to_rgb1 = ToRGB(channels['4'], num_style_feat, upsample=False)
242
-
243
- self.log_size = int(math.log(out_size, 2))
244
- self.num_layers = (self.log_size - 2) * 2 + 1
245
- self.num_latent = self.log_size * 2 - 2
246
-
247
- self.style_convs = nn.ModuleList()
248
- self.to_rgbs = nn.ModuleList()
249
- self.noises = nn.Module()
250
-
251
- in_channels = channels['4']
252
- # noise
253
- for layer_idx in range(self.num_layers):
254
- resolution = 2**((layer_idx + 5) // 2)
255
- shape = [1, 1, resolution, resolution]
256
- self.noises.register_buffer(f'noise{layer_idx}', torch.randn(*shape))
257
- # style convs and to_rgbs
258
- for i in range(3, self.log_size + 1):
259
- out_channels = channels[f'{2**i}']
260
- self.style_convs.append(
261
- StyleConv(
262
- in_channels,
263
- out_channels,
264
- kernel_size=3,
265
- num_style_feat=num_style_feat,
266
- demodulate=True,
267
- sample_mode='upsample'))
268
- self.style_convs.append(
269
- StyleConv(
270
- out_channels,
271
- out_channels,
272
- kernel_size=3,
273
- num_style_feat=num_style_feat,
274
- demodulate=True,
275
- sample_mode=None))
276
- self.to_rgbs.append(ToRGB(out_channels, num_style_feat, upsample=True))
277
- in_channels = out_channels
278
-
279
- def make_noise(self):
280
- """Make noise for noise injection."""
281
- device = self.constant_input.weight.device
282
- noises = [torch.randn(1, 1, 4, 4, device=device)]
283
-
284
- for i in range(3, self.log_size + 1):
285
- for _ in range(2):
286
- noises.append(torch.randn(1, 1, 2**i, 2**i, device=device))
287
-
288
- return noises
289
-
290
- def get_latent(self, x):
291
- return self.style_mlp(x)
292
-
293
- def mean_latent(self, num_latent):
294
- latent_in = torch.randn(num_latent, self.num_style_feat, device=self.constant_input.weight.device)
295
- latent = self.style_mlp(latent_in).mean(0, keepdim=True)
296
- return latent
297
-
298
- def forward(self,
299
- styles,
300
- input_is_latent=False,
301
- noise=None,
302
- randomize_noise=True,
303
- truncation=1,
304
- truncation_latent=None,
305
- inject_index=None,
306
- return_latents=False):
307
- """Forward function for StyleGAN2GeneratorClean.
308
-
309
- Args:
310
- styles (list[Tensor]): Sample codes of styles.
311
- input_is_latent (bool): Whether input is latent style. Default: False.
312
- noise (Tensor | None): Input noise or None. Default: None.
313
- randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True.
314
- truncation (float): The truncation ratio. Default: 1.
315
- truncation_latent (Tensor | None): The truncation latent tensor. Default: None.
316
- inject_index (int | None): The injection index for mixing noise. Default: None.
317
- return_latents (bool): Whether to return style latents. Default: False.
318
- """
319
- # style codes -> latents with Style MLP layer
320
- if not input_is_latent:
321
- styles = [self.style_mlp(s) for s in styles]
322
- # noises
323
- if noise is None:
324
- if randomize_noise:
325
- noise = [None] * self.num_layers # for each style conv layer
326
- else: # use the stored noise
327
- noise = [getattr(self.noises, f'noise{i}') for i in range(self.num_layers)]
328
- # style truncation
329
- if truncation < 1:
330
- style_truncation = []
331
- for style in styles:
332
- style_truncation.append(truncation_latent + truncation * (style - truncation_latent))
333
- styles = style_truncation
334
- # get style latents with injection
335
- if len(styles) == 1:
336
- inject_index = self.num_latent
337
-
338
- if styles[0].ndim < 3:
339
- # repeat latent code for all the layers
340
- latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
341
- else: # used for encoder with different latent code for each layer
342
- latent = styles[0]
343
- elif len(styles) == 2: # mixing noises
344
- if inject_index is None:
345
- inject_index = random.randint(1, self.num_latent - 1)
346
- latent1 = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
347
- latent2 = styles[1].unsqueeze(1).repeat(1, self.num_latent - inject_index, 1)
348
- latent = torch.cat([latent1, latent2], 1)
349
-
350
- # main generation
351
- out = self.constant_input(latent.shape[0])
352
- out = self.style_conv1(out, latent[:, 0], noise=noise[0])
353
- skip = self.to_rgb1(out, latent[:, 1])
354
-
355
- i = 1
356
- for conv1, conv2, noise1, noise2, to_rgb in zip(self.style_convs[::2], self.style_convs[1::2], noise[1::2],
357
- noise[2::2], self.to_rgbs):
358
- out = conv1(out, latent[:, i], noise=noise1)
359
- out = conv2(out, latent[:, i + 1], noise=noise2)
360
- skip = to_rgb(out, latent[:, i + 2], skip) # feature back to the rgb space
361
- i += 2
362
-
363
- image = skip
364
-
365
- if return_latents:
366
- return image, latent
367
- else:
368
- return image, None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
gfpgan/data/__init__.py DELETED
@@ -1,10 +0,0 @@
1
- import importlib
2
- from basicsr.utils import scandir
3
- from os import path as osp
4
-
5
- # automatically scan and import dataset modules for registry
6
- # scan all the files that end with '_dataset.py' under the data folder
7
- data_folder = osp.dirname(osp.abspath(__file__))
8
- dataset_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(data_folder) if v.endswith('_dataset.py')]
9
- # import all the dataset modules
10
- _dataset_modules = [importlib.import_module(f'gfpgan.data.{file_name}') for file_name in dataset_filenames]
 
 
 
 
 
 
 
 
 
 
 
gfpgan/data/ffhq_degradation_dataset.py DELETED
@@ -1,230 +0,0 @@
1
- import cv2
2
- import math
3
- import numpy as np
4
- import os.path as osp
5
- import torch
6
- import torch.utils.data as data
7
- from basicsr.data import degradations as degradations
8
- from basicsr.data.data_util import paths_from_folder
9
- from basicsr.data.transforms import augment
10
- from basicsr.utils import FileClient, get_root_logger, imfrombytes, img2tensor
11
- from basicsr.utils.registry import DATASET_REGISTRY
12
- from torchvision.transforms.functional import (adjust_brightness, adjust_contrast, adjust_hue, adjust_saturation,
13
- normalize)
14
-
15
-
16
- @DATASET_REGISTRY.register()
17
- class FFHQDegradationDataset(data.Dataset):
18
- """FFHQ dataset for GFPGAN.
19
-
20
- It reads high resolution images, and then generate low-quality (LQ) images on-the-fly.
21
-
22
- Args:
23
- opt (dict): Config for train datasets. It contains the following keys:
24
- dataroot_gt (str): Data root path for gt.
25
- io_backend (dict): IO backend type and other kwarg.
26
- mean (list | tuple): Image mean.
27
- std (list | tuple): Image std.
28
- use_hflip (bool): Whether to horizontally flip.
29
- Please see more options in the codes.
30
- """
31
-
32
- def __init__(self, opt):
33
- super(FFHQDegradationDataset, self).__init__()
34
- self.opt = opt
35
- # file client (io backend)
36
- self.file_client = None
37
- self.io_backend_opt = opt['io_backend']
38
-
39
- self.gt_folder = opt['dataroot_gt']
40
- self.mean = opt['mean']
41
- self.std = opt['std']
42
- self.out_size = opt['out_size']
43
-
44
- self.crop_components = opt.get('crop_components', False) # facial components
45
- self.eye_enlarge_ratio = opt.get('eye_enlarge_ratio', 1) # whether enlarge eye regions
46
-
47
- if self.crop_components:
48
- # load component list from a pre-process pth files
49
- self.components_list = torch.load(opt.get('component_path'))
50
-
51
- # file client (lmdb io backend)
52
- if self.io_backend_opt['type'] == 'lmdb':
53
- self.io_backend_opt['db_paths'] = self.gt_folder
54
- if not self.gt_folder.endswith('.lmdb'):
55
- raise ValueError(f"'dataroot_gt' should end with '.lmdb', but received {self.gt_folder}")
56
- with open(osp.join(self.gt_folder, 'meta_info.txt')) as fin:
57
- self.paths = [line.split('.')[0] for line in fin]
58
- else:
59
- # disk backend: scan file list from a folder
60
- self.paths = paths_from_folder(self.gt_folder)
61
-
62
- # degradation configurations
63
- self.blur_kernel_size = opt['blur_kernel_size']
64
- self.kernel_list = opt['kernel_list']
65
- self.kernel_prob = opt['kernel_prob']
66
- self.blur_sigma = opt['blur_sigma']
67
- self.downsample_range = opt['downsample_range']
68
- self.noise_range = opt['noise_range']
69
- self.jpeg_range = opt['jpeg_range']
70
-
71
- # color jitter
72
- self.color_jitter_prob = opt.get('color_jitter_prob')
73
- self.color_jitter_pt_prob = opt.get('color_jitter_pt_prob')
74
- self.color_jitter_shift = opt.get('color_jitter_shift', 20)
75
- # to gray
76
- self.gray_prob = opt.get('gray_prob')
77
-
78
- logger = get_root_logger()
79
- logger.info(f'Blur: blur_kernel_size {self.blur_kernel_size}, sigma: [{", ".join(map(str, self.blur_sigma))}]')
80
- logger.info(f'Downsample: downsample_range [{", ".join(map(str, self.downsample_range))}]')
81
- logger.info(f'Noise: [{", ".join(map(str, self.noise_range))}]')
82
- logger.info(f'JPEG compression: [{", ".join(map(str, self.jpeg_range))}]')
83
-
84
- if self.color_jitter_prob is not None:
85
- logger.info(f'Use random color jitter. Prob: {self.color_jitter_prob}, shift: {self.color_jitter_shift}')
86
- if self.gray_prob is not None:
87
- logger.info(f'Use random gray. Prob: {self.gray_prob}')
88
- self.color_jitter_shift /= 255.
89
-
90
- @staticmethod
91
- def color_jitter(img, shift):
92
- """jitter color: randomly jitter the RGB values, in numpy formats"""
93
- jitter_val = np.random.uniform(-shift, shift, 3).astype(np.float32)
94
- img = img + jitter_val
95
- img = np.clip(img, 0, 1)
96
- return img
97
-
98
- @staticmethod
99
- def color_jitter_pt(img, brightness, contrast, saturation, hue):
100
- """jitter color: randomly jitter the brightness, contrast, saturation, and hue, in torch Tensor formats"""
101
- fn_idx = torch.randperm(4)
102
- for fn_id in fn_idx:
103
- if fn_id == 0 and brightness is not None:
104
- brightness_factor = torch.tensor(1.0).uniform_(brightness[0], brightness[1]).item()
105
- img = adjust_brightness(img, brightness_factor)
106
-
107
- if fn_id == 1 and contrast is not None:
108
- contrast_factor = torch.tensor(1.0).uniform_(contrast[0], contrast[1]).item()
109
- img = adjust_contrast(img, contrast_factor)
110
-
111
- if fn_id == 2 and saturation is not None:
112
- saturation_factor = torch.tensor(1.0).uniform_(saturation[0], saturation[1]).item()
113
- img = adjust_saturation(img, saturation_factor)
114
-
115
- if fn_id == 3 and hue is not None:
116
- hue_factor = torch.tensor(1.0).uniform_(hue[0], hue[1]).item()
117
- img = adjust_hue(img, hue_factor)
118
- return img
119
-
120
- def get_component_coordinates(self, index, status):
121
- """Get facial component (left_eye, right_eye, mouth) coordinates from a pre-loaded pth file"""
122
- components_bbox = self.components_list[f'{index:08d}']
123
- if status[0]: # hflip
124
- # exchange right and left eye
125
- tmp = components_bbox['left_eye']
126
- components_bbox['left_eye'] = components_bbox['right_eye']
127
- components_bbox['right_eye'] = tmp
128
- # modify the width coordinate
129
- components_bbox['left_eye'][0] = self.out_size - components_bbox['left_eye'][0]
130
- components_bbox['right_eye'][0] = self.out_size - components_bbox['right_eye'][0]
131
- components_bbox['mouth'][0] = self.out_size - components_bbox['mouth'][0]
132
-
133
- # get coordinates
134
- locations = []
135
- for part in ['left_eye', 'right_eye', 'mouth']:
136
- mean = components_bbox[part][0:2]
137
- half_len = components_bbox[part][2]
138
- if 'eye' in part:
139
- half_len *= self.eye_enlarge_ratio
140
- loc = np.hstack((mean - half_len + 1, mean + half_len))
141
- loc = torch.from_numpy(loc).float()
142
- locations.append(loc)
143
- return locations
144
-
145
- def __getitem__(self, index):
146
- if self.file_client is None:
147
- self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt)
148
-
149
- # load gt image
150
- # Shape: (h, w, c); channel order: BGR; image range: [0, 1], float32.
151
- gt_path = self.paths[index]
152
- img_bytes = self.file_client.get(gt_path)
153
- img_gt = imfrombytes(img_bytes, float32=True)
154
-
155
- # random horizontal flip
156
- img_gt, status = augment(img_gt, hflip=self.opt['use_hflip'], rotation=False, return_status=True)
157
- h, w, _ = img_gt.shape
158
-
159
- # get facial component coordinates
160
- if self.crop_components:
161
- locations = self.get_component_coordinates(index, status)
162
- loc_left_eye, loc_right_eye, loc_mouth = locations
163
-
164
- # ------------------------ generate lq image ------------------------ #
165
- # blur
166
- kernel = degradations.random_mixed_kernels(
167
- self.kernel_list,
168
- self.kernel_prob,
169
- self.blur_kernel_size,
170
- self.blur_sigma,
171
- self.blur_sigma, [-math.pi, math.pi],
172
- noise_range=None)
173
- img_lq = cv2.filter2D(img_gt, -1, kernel)
174
- # downsample
175
- scale = np.random.uniform(self.downsample_range[0], self.downsample_range[1])
176
- img_lq = cv2.resize(img_lq, (int(w // scale), int(h // scale)), interpolation=cv2.INTER_LINEAR)
177
- # noise
178
- if self.noise_range is not None:
179
- img_lq = degradations.random_add_gaussian_noise(img_lq, self.noise_range)
180
- # jpeg compression
181
- if self.jpeg_range is not None:
182
- img_lq = degradations.random_add_jpg_compression(img_lq, self.jpeg_range)
183
-
184
- # resize to original size
185
- img_lq = cv2.resize(img_lq, (w, h), interpolation=cv2.INTER_LINEAR)
186
-
187
- # random color jitter (only for lq)
188
- if self.color_jitter_prob is not None and (np.random.uniform() < self.color_jitter_prob):
189
- img_lq = self.color_jitter(img_lq, self.color_jitter_shift)
190
- # random to gray (only for lq)
191
- if self.gray_prob and np.random.uniform() < self.gray_prob:
192
- img_lq = cv2.cvtColor(img_lq, cv2.COLOR_BGR2GRAY)
193
- img_lq = np.tile(img_lq[:, :, None], [1, 1, 3])
194
- if self.opt.get('gt_gray'): # whether convert GT to gray images
195
- img_gt = cv2.cvtColor(img_gt, cv2.COLOR_BGR2GRAY)
196
- img_gt = np.tile(img_gt[:, :, None], [1, 1, 3]) # repeat the color channels
197
-
198
- # BGR to RGB, HWC to CHW, numpy to tensor
199
- img_gt, img_lq = img2tensor([img_gt, img_lq], bgr2rgb=True, float32=True)
200
-
201
- # random color jitter (pytorch version) (only for lq)
202
- if self.color_jitter_pt_prob is not None and (np.random.uniform() < self.color_jitter_pt_prob):
203
- brightness = self.opt.get('brightness', (0.5, 1.5))
204
- contrast = self.opt.get('contrast', (0.5, 1.5))
205
- saturation = self.opt.get('saturation', (0, 1.5))
206
- hue = self.opt.get('hue', (-0.1, 0.1))
207
- img_lq = self.color_jitter_pt(img_lq, brightness, contrast, saturation, hue)
208
-
209
- # round and clip
210
- img_lq = torch.clamp((img_lq * 255.0).round(), 0, 255) / 255.
211
-
212
- # normalize
213
- normalize(img_gt, self.mean, self.std, inplace=True)
214
- normalize(img_lq, self.mean, self.std, inplace=True)
215
-
216
- if self.crop_components:
217
- return_dict = {
218
- 'lq': img_lq,
219
- 'gt': img_gt,
220
- 'gt_path': gt_path,
221
- 'loc_left_eye': loc_left_eye,
222
- 'loc_right_eye': loc_right_eye,
223
- 'loc_mouth': loc_mouth
224
- }
225
- return return_dict
226
- else:
227
- return {'lq': img_lq, 'gt': img_gt, 'gt_path': gt_path}
228
-
229
- def __len__(self):
230
- return len(self.paths)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
gfpgan/models/__init__.py DELETED
@@ -1,10 +0,0 @@
1
- import importlib
2
- from basicsr.utils import scandir
3
- from os import path as osp
4
-
5
- # automatically scan and import model modules for registry
6
- # scan all the files that end with '_model.py' under the model folder
7
- model_folder = osp.dirname(osp.abspath(__file__))
8
- model_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(model_folder) if v.endswith('_model.py')]
9
- # import all the model modules
10
- _model_modules = [importlib.import_module(f'gfpgan.models.{file_name}') for file_name in model_filenames]
 
 
 
 
 
 
 
 
 
 
 
gfpgan/models/gfpgan_model.py DELETED
@@ -1,580 +0,0 @@
1
- import math
2
- import os.path as osp
3
- import torch
4
- from basicsr.archs import build_network
5
- from basicsr.losses import build_loss
6
- from basicsr.losses.gan_loss import r1_penalty
7
- from basicsr.metrics import calculate_metric
8
- from basicsr.models.base_model import BaseModel
9
- from basicsr.utils import get_root_logger, imwrite, tensor2img
10
- from basicsr.utils.registry import MODEL_REGISTRY
11
- from collections import OrderedDict
12
- from torch.nn import functional as F
13
- from torchvision.ops import roi_align
14
- from tqdm import tqdm
15
-
16
-
17
- @MODEL_REGISTRY.register()
18
- class GFPGANModel(BaseModel):
19
- """The GFPGAN model for Towards real-world blind face restoratin with generative facial prior"""
20
-
21
- def __init__(self, opt):
22
- super(GFPGANModel, self).__init__(opt)
23
- self.idx = 0 # it is used for saving data for check
24
-
25
- # define network
26
- self.net_g = build_network(opt['network_g'])
27
- self.net_g = self.model_to_device(self.net_g)
28
- self.print_network(self.net_g)
29
-
30
- # load pretrained model
31
- load_path = self.opt['path'].get('pretrain_network_g', None)
32
- if load_path is not None:
33
- param_key = self.opt['path'].get('param_key_g', 'params')
34
- self.load_network(self.net_g, load_path, self.opt['path'].get('strict_load_g', True), param_key)
35
-
36
- self.log_size = int(math.log(self.opt['network_g']['out_size'], 2))
37
-
38
- if self.is_train:
39
- self.init_training_settings()
40
-
41
- def init_training_settings(self):
42
- train_opt = self.opt['train']
43
-
44
- # ----------- define net_d ----------- #
45
- self.net_d = build_network(self.opt['network_d'])
46
- self.net_d = self.model_to_device(self.net_d)
47
- self.print_network(self.net_d)
48
- # load pretrained model
49
- load_path = self.opt['path'].get('pretrain_network_d', None)
50
- if load_path is not None:
51
- self.load_network(self.net_d, load_path, self.opt['path'].get('strict_load_d', True))
52
-
53
- # ----------- define net_g with Exponential Moving Average (EMA) ----------- #
54
- # net_g_ema only used for testing on one GPU and saving. There is no need to wrap with DistributedDataParallel
55
- self.net_g_ema = build_network(self.opt['network_g']).to(self.device)
56
- # load pretrained model
57
- load_path = self.opt['path'].get('pretrain_network_g', None)
58
- if load_path is not None:
59
- self.load_network(self.net_g_ema, load_path, self.opt['path'].get('strict_load_g', True), 'params_ema')
60
- else:
61
- self.model_ema(0) # copy net_g weight
62
-
63
- self.net_g.train()
64
- self.net_d.train()
65
- self.net_g_ema.eval()
66
-
67
- # ----------- facial component networks ----------- #
68
- if ('network_d_left_eye' in self.opt and 'network_d_right_eye' in self.opt and 'network_d_mouth' in self.opt):
69
- self.use_facial_disc = True
70
- else:
71
- self.use_facial_disc = False
72
-
73
- if self.use_facial_disc:
74
- # left eye
75
- self.net_d_left_eye = build_network(self.opt['network_d_left_eye'])
76
- self.net_d_left_eye = self.model_to_device(self.net_d_left_eye)
77
- self.print_network(self.net_d_left_eye)
78
- load_path = self.opt['path'].get('pretrain_network_d_left_eye')
79
- if load_path is not None:
80
- self.load_network(self.net_d_left_eye, load_path, True, 'params')
81
- # right eye
82
- self.net_d_right_eye = build_network(self.opt['network_d_right_eye'])
83
- self.net_d_right_eye = self.model_to_device(self.net_d_right_eye)
84
- self.print_network(self.net_d_right_eye)
85
- load_path = self.opt['path'].get('pretrain_network_d_right_eye')
86
- if load_path is not None:
87
- self.load_network(self.net_d_right_eye, load_path, True, 'params')
88
- # mouth
89
- self.net_d_mouth = build_network(self.opt['network_d_mouth'])
90
- self.net_d_mouth = self.model_to_device(self.net_d_mouth)
91
- self.print_network(self.net_d_mouth)
92
- load_path = self.opt['path'].get('pretrain_network_d_mouth')
93
- if load_path is not None:
94
- self.load_network(self.net_d_mouth, load_path, True, 'params')
95
-
96
- self.net_d_left_eye.train()
97
- self.net_d_right_eye.train()
98
- self.net_d_mouth.train()
99
-
100
- # ----------- define facial component gan loss ----------- #
101
- self.cri_component = build_loss(train_opt['gan_component_opt']).to(self.device)
102
-
103
- # ----------- define losses ----------- #
104
- # pixel loss
105
- if train_opt.get('pixel_opt'):
106
- self.cri_pix = build_loss(train_opt['pixel_opt']).to(self.device)
107
- else:
108
- self.cri_pix = None
109
-
110
- # perceptual loss
111
- if train_opt.get('perceptual_opt'):
112
- self.cri_perceptual = build_loss(train_opt['perceptual_opt']).to(self.device)
113
- else:
114
- self.cri_perceptual = None
115
-
116
- # L1 loss is used in pyramid loss, component style loss and identity loss
117
- self.cri_l1 = build_loss(train_opt['L1_opt']).to(self.device)
118
-
119
- # gan loss (wgan)
120
- self.cri_gan = build_loss(train_opt['gan_opt']).to(self.device)
121
-
122
- # ----------- define identity loss ----------- #
123
- if 'network_identity' in self.opt:
124
- self.use_identity = True
125
- else:
126
- self.use_identity = False
127
-
128
- if self.use_identity:
129
- # define identity network
130
- self.network_identity = build_network(self.opt['network_identity'])
131
- self.network_identity = self.model_to_device(self.network_identity)
132
- self.print_network(self.network_identity)
133
- load_path = self.opt['path'].get('pretrain_network_identity')
134
- if load_path is not None:
135
- self.load_network(self.network_identity, load_path, True, None)
136
- self.network_identity.eval()
137
- for param in self.network_identity.parameters():
138
- param.requires_grad = False
139
-
140
- # regularization weights
141
- self.r1_reg_weight = train_opt['r1_reg_weight'] # for discriminator
142
- self.net_d_iters = train_opt.get('net_d_iters', 1)
143
- self.net_d_init_iters = train_opt.get('net_d_init_iters', 0)
144
- self.net_d_reg_every = train_opt['net_d_reg_every']
145
-
146
- # set up optimizers and schedulers
147
- self.setup_optimizers()
148
- self.setup_schedulers()
149
-
150
- def setup_optimizers(self):
151
- train_opt = self.opt['train']
152
-
153
- # ----------- optimizer g ----------- #
154
- net_g_reg_ratio = 1
155
- normal_params = []
156
- for _, param in self.net_g.named_parameters():
157
- normal_params.append(param)
158
- optim_params_g = [{ # add normal params first
159
- 'params': normal_params,
160
- 'lr': train_opt['optim_g']['lr']
161
- }]
162
- optim_type = train_opt['optim_g'].pop('type')
163
- lr = train_opt['optim_g']['lr'] * net_g_reg_ratio
164
- betas = (0**net_g_reg_ratio, 0.99**net_g_reg_ratio)
165
- self.optimizer_g = self.get_optimizer(optim_type, optim_params_g, lr, betas=betas)
166
- self.optimizers.append(self.optimizer_g)
167
-
168
- # ----------- optimizer d ----------- #
169
- net_d_reg_ratio = self.net_d_reg_every / (self.net_d_reg_every + 1)
170
- normal_params = []
171
- for _, param in self.net_d.named_parameters():
172
- normal_params.append(param)
173
- optim_params_d = [{ # add normal params first
174
- 'params': normal_params,
175
- 'lr': train_opt['optim_d']['lr']
176
- }]
177
- optim_type = train_opt['optim_d'].pop('type')
178
- lr = train_opt['optim_d']['lr'] * net_d_reg_ratio
179
- betas = (0**net_d_reg_ratio, 0.99**net_d_reg_ratio)
180
- self.optimizer_d = self.get_optimizer(optim_type, optim_params_d, lr, betas=betas)
181
- self.optimizers.append(self.optimizer_d)
182
-
183
- # ----------- optimizers for facial component networks ----------- #
184
- if self.use_facial_disc:
185
- # setup optimizers for facial component discriminators
186
- optim_type = train_opt['optim_component'].pop('type')
187
- lr = train_opt['optim_component']['lr']
188
- # left eye
189
- self.optimizer_d_left_eye = self.get_optimizer(
190
- optim_type, self.net_d_left_eye.parameters(), lr, betas=(0.9, 0.99))
191
- self.optimizers.append(self.optimizer_d_left_eye)
192
- # right eye
193
- self.optimizer_d_right_eye = self.get_optimizer(
194
- optim_type, self.net_d_right_eye.parameters(), lr, betas=(0.9, 0.99))
195
- self.optimizers.append(self.optimizer_d_right_eye)
196
- # mouth
197
- self.optimizer_d_mouth = self.get_optimizer(
198
- optim_type, self.net_d_mouth.parameters(), lr, betas=(0.9, 0.99))
199
- self.optimizers.append(self.optimizer_d_mouth)
200
-
201
- def feed_data(self, data):
202
- self.lq = data['lq'].to(self.device)
203
- if 'gt' in data:
204
- self.gt = data['gt'].to(self.device)
205
-
206
- if 'loc_left_eye' in data:
207
- # get facial component locations, shape (batch, 4)
208
- self.loc_left_eyes = data['loc_left_eye']
209
- self.loc_right_eyes = data['loc_right_eye']
210
- self.loc_mouths = data['loc_mouth']
211
-
212
- # uncomment to check data
213
- # import torchvision
214
- # if self.opt['rank'] == 0:
215
- # import os
216
- # os.makedirs('tmp/gt', exist_ok=True)
217
- # os.makedirs('tmp/lq', exist_ok=True)
218
- # print(self.idx)
219
- # torchvision.utils.save_image(
220
- # self.gt, f'tmp/gt/gt_{self.idx}.png', nrow=4, padding=2, normalize=True, range=(-1, 1))
221
- # torchvision.utils.save_image(
222
- # self.lq, f'tmp/lq/lq{self.idx}.png', nrow=4, padding=2, normalize=True, range=(-1, 1))
223
- # self.idx = self.idx + 1
224
-
225
- def construct_img_pyramid(self):
226
- """Construct image pyramid for intermediate restoration loss"""
227
- pyramid_gt = [self.gt]
228
- down_img = self.gt
229
- for _ in range(0, self.log_size - 3):
230
- down_img = F.interpolate(down_img, scale_factor=0.5, mode='bilinear', align_corners=False)
231
- pyramid_gt.insert(0, down_img)
232
- return pyramid_gt
233
-
234
- def get_roi_regions(self, eye_out_size=80, mouth_out_size=120):
235
- face_ratio = int(self.opt['network_g']['out_size'] / 512)
236
- eye_out_size *= face_ratio
237
- mouth_out_size *= face_ratio
238
-
239
- rois_eyes = []
240
- rois_mouths = []
241
- for b in range(self.loc_left_eyes.size(0)): # loop for batch size
242
- # left eye and right eye
243
- img_inds = self.loc_left_eyes.new_full((2, 1), b)
244
- bbox = torch.stack([self.loc_left_eyes[b, :], self.loc_right_eyes[b, :]], dim=0) # shape: (2, 4)
245
- rois = torch.cat([img_inds, bbox], dim=-1) # shape: (2, 5)
246
- rois_eyes.append(rois)
247
- # mouse
248
- img_inds = self.loc_left_eyes.new_full((1, 1), b)
249
- rois = torch.cat([img_inds, self.loc_mouths[b:b + 1, :]], dim=-1) # shape: (1, 5)
250
- rois_mouths.append(rois)
251
-
252
- rois_eyes = torch.cat(rois_eyes, 0).to(self.device)
253
- rois_mouths = torch.cat(rois_mouths, 0).to(self.device)
254
-
255
- # real images
256
- all_eyes = roi_align(self.gt, boxes=rois_eyes, output_size=eye_out_size) * face_ratio
257
- self.left_eyes_gt = all_eyes[0::2, :, :, :]
258
- self.right_eyes_gt = all_eyes[1::2, :, :, :]
259
- self.mouths_gt = roi_align(self.gt, boxes=rois_mouths, output_size=mouth_out_size) * face_ratio
260
- # output
261
- all_eyes = roi_align(self.output, boxes=rois_eyes, output_size=eye_out_size) * face_ratio
262
- self.left_eyes = all_eyes[0::2, :, :, :]
263
- self.right_eyes = all_eyes[1::2, :, :, :]
264
- self.mouths = roi_align(self.output, boxes=rois_mouths, output_size=mouth_out_size) * face_ratio
265
-
266
- def _gram_mat(self, x):
267
- """Calculate Gram matrix.
268
-
269
- Args:
270
- x (torch.Tensor): Tensor with shape of (n, c, h, w).
271
-
272
- Returns:
273
- torch.Tensor: Gram matrix.
274
- """
275
- n, c, h, w = x.size()
276
- features = x.view(n, c, w * h)
277
- features_t = features.transpose(1, 2)
278
- gram = features.bmm(features_t) / (c * h * w)
279
- return gram
280
-
281
- def gray_resize_for_identity(self, out, size=128):
282
- out_gray = (0.2989 * out[:, 0, :, :] + 0.5870 * out[:, 1, :, :] + 0.1140 * out[:, 2, :, :])
283
- out_gray = out_gray.unsqueeze(1)
284
- out_gray = F.interpolate(out_gray, (size, size), mode='bilinear', align_corners=False)
285
- return out_gray
286
-
287
- def optimize_parameters(self, current_iter):
288
- # optimize net_g
289
- for p in self.net_d.parameters():
290
- p.requires_grad = False
291
- self.optimizer_g.zero_grad()
292
-
293
- # do not update facial component net_d
294
- if self.use_facial_disc:
295
- for p in self.net_d_left_eye.parameters():
296
- p.requires_grad = False
297
- for p in self.net_d_right_eye.parameters():
298
- p.requires_grad = False
299
- for p in self.net_d_mouth.parameters():
300
- p.requires_grad = False
301
-
302
- # image pyramid loss weight
303
- if current_iter < self.opt['train'].get('remove_pyramid_loss', float('inf')):
304
- pyramid_loss_weight = self.opt['train'].get('pyramid_loss_weight', 1)
305
- else:
306
- pyramid_loss_weight = 1e-12 # very small loss
307
- if pyramid_loss_weight > 0:
308
- self.output, out_rgbs = self.net_g(self.lq, return_rgb=True)
309
- pyramid_gt = self.construct_img_pyramid()
310
- else:
311
- self.output, out_rgbs = self.net_g(self.lq, return_rgb=False)
312
-
313
- # get roi-align regions
314
- if self.use_facial_disc:
315
- self.get_roi_regions(eye_out_size=80, mouth_out_size=120)
316
-
317
- l_g_total = 0
318
- loss_dict = OrderedDict()
319
- if (current_iter % self.net_d_iters == 0 and current_iter > self.net_d_init_iters):
320
- # pixel loss
321
- if self.cri_pix:
322
- l_g_pix = self.cri_pix(self.output, self.gt)
323
- l_g_total += l_g_pix
324
- loss_dict['l_g_pix'] = l_g_pix
325
-
326
- # image pyramid loss
327
- if pyramid_loss_weight > 0:
328
- for i in range(0, self.log_size - 2):
329
- l_pyramid = self.cri_l1(out_rgbs[i], pyramid_gt[i]) * pyramid_loss_weight
330
- l_g_total += l_pyramid
331
- loss_dict[f'l_p_{2**(i+3)}'] = l_pyramid
332
-
333
- # perceptual loss
334
- if self.cri_perceptual:
335
- l_g_percep, l_g_style = self.cri_perceptual(self.output, self.gt)
336
- if l_g_percep is not None:
337
- l_g_total += l_g_percep
338
- loss_dict['l_g_percep'] = l_g_percep
339
- if l_g_style is not None:
340
- l_g_total += l_g_style
341
- loss_dict['l_g_style'] = l_g_style
342
-
343
- # gan loss
344
- fake_g_pred = self.net_d(self.output)
345
- l_g_gan = self.cri_gan(fake_g_pred, True, is_disc=False)
346
- l_g_total += l_g_gan
347
- loss_dict['l_g_gan'] = l_g_gan
348
-
349
- # facial component loss
350
- if self.use_facial_disc:
351
- # left eye
352
- fake_left_eye, fake_left_eye_feats = self.net_d_left_eye(self.left_eyes, return_feats=True)
353
- l_g_gan = self.cri_component(fake_left_eye, True, is_disc=False)
354
- l_g_total += l_g_gan
355
- loss_dict['l_g_gan_left_eye'] = l_g_gan
356
- # right eye
357
- fake_right_eye, fake_right_eye_feats = self.net_d_right_eye(self.right_eyes, return_feats=True)
358
- l_g_gan = self.cri_component(fake_right_eye, True, is_disc=False)
359
- l_g_total += l_g_gan
360
- loss_dict['l_g_gan_right_eye'] = l_g_gan
361
- # mouth
362
- fake_mouth, fake_mouth_feats = self.net_d_mouth(self.mouths, return_feats=True)
363
- l_g_gan = self.cri_component(fake_mouth, True, is_disc=False)
364
- l_g_total += l_g_gan
365
- loss_dict['l_g_gan_mouth'] = l_g_gan
366
-
367
- if self.opt['train'].get('comp_style_weight', 0) > 0:
368
- # get gt feat
369
- _, real_left_eye_feats = self.net_d_left_eye(self.left_eyes_gt, return_feats=True)
370
- _, real_right_eye_feats = self.net_d_right_eye(self.right_eyes_gt, return_feats=True)
371
- _, real_mouth_feats = self.net_d_mouth(self.mouths_gt, return_feats=True)
372
-
373
- def _comp_style(feat, feat_gt, criterion):
374
- return criterion(self._gram_mat(feat[0]), self._gram_mat(
375
- feat_gt[0].detach())) * 0.5 + criterion(
376
- self._gram_mat(feat[1]), self._gram_mat(feat_gt[1].detach()))
377
-
378
- # facial component style loss
379
- comp_style_loss = 0
380
- comp_style_loss += _comp_style(fake_left_eye_feats, real_left_eye_feats, self.cri_l1)
381
- comp_style_loss += _comp_style(fake_right_eye_feats, real_right_eye_feats, self.cri_l1)
382
- comp_style_loss += _comp_style(fake_mouth_feats, real_mouth_feats, self.cri_l1)
383
- comp_style_loss = comp_style_loss * self.opt['train']['comp_style_weight']
384
- l_g_total += comp_style_loss
385
- loss_dict['l_g_comp_style_loss'] = comp_style_loss
386
-
387
- # identity loss
388
- if self.use_identity:
389
- identity_weight = self.opt['train']['identity_weight']
390
- # get gray images and resize
391
- out_gray = self.gray_resize_for_identity(self.output)
392
- gt_gray = self.gray_resize_for_identity(self.gt)
393
-
394
- identity_gt = self.network_identity(gt_gray).detach()
395
- identity_out = self.network_identity(out_gray)
396
- l_identity = self.cri_l1(identity_out, identity_gt) * identity_weight
397
- l_g_total += l_identity
398
- loss_dict['l_identity'] = l_identity
399
-
400
- l_g_total.backward()
401
- self.optimizer_g.step()
402
-
403
- # EMA
404
- self.model_ema(decay=0.5**(32 / (10 * 1000)))
405
-
406
- # ----------- optimize net_d ----------- #
407
- for p in self.net_d.parameters():
408
- p.requires_grad = True
409
- self.optimizer_d.zero_grad()
410
- if self.use_facial_disc:
411
- for p in self.net_d_left_eye.parameters():
412
- p.requires_grad = True
413
- for p in self.net_d_right_eye.parameters():
414
- p.requires_grad = True
415
- for p in self.net_d_mouth.parameters():
416
- p.requires_grad = True
417
- self.optimizer_d_left_eye.zero_grad()
418
- self.optimizer_d_right_eye.zero_grad()
419
- self.optimizer_d_mouth.zero_grad()
420
-
421
- fake_d_pred = self.net_d(self.output.detach())
422
- real_d_pred = self.net_d(self.gt)
423
- l_d = self.cri_gan(real_d_pred, True, is_disc=True) + self.cri_gan(fake_d_pred, False, is_disc=True)
424
- loss_dict['l_d'] = l_d
425
- # In WGAN, real_score should be positive and fake_score should be negative
426
- loss_dict['real_score'] = real_d_pred.detach().mean()
427
- loss_dict['fake_score'] = fake_d_pred.detach().mean()
428
- l_d.backward()
429
-
430
- # regularization loss
431
- if current_iter % self.net_d_reg_every == 0:
432
- self.gt.requires_grad = True
433
- real_pred = self.net_d(self.gt)
434
- l_d_r1 = r1_penalty(real_pred, self.gt)
435
- l_d_r1 = (self.r1_reg_weight / 2 * l_d_r1 * self.net_d_reg_every + 0 * real_pred[0])
436
- loss_dict['l_d_r1'] = l_d_r1.detach().mean()
437
- l_d_r1.backward()
438
-
439
- self.optimizer_d.step()
440
-
441
- # optimize facial component discriminators
442
- if self.use_facial_disc:
443
- # left eye
444
- fake_d_pred, _ = self.net_d_left_eye(self.left_eyes.detach())
445
- real_d_pred, _ = self.net_d_left_eye(self.left_eyes_gt)
446
- l_d_left_eye = self.cri_component(
447
- real_d_pred, True, is_disc=True) + self.cri_gan(
448
- fake_d_pred, False, is_disc=True)
449
- loss_dict['l_d_left_eye'] = l_d_left_eye
450
- l_d_left_eye.backward()
451
- # right eye
452
- fake_d_pred, _ = self.net_d_right_eye(self.right_eyes.detach())
453
- real_d_pred, _ = self.net_d_right_eye(self.right_eyes_gt)
454
- l_d_right_eye = self.cri_component(
455
- real_d_pred, True, is_disc=True) + self.cri_gan(
456
- fake_d_pred, False, is_disc=True)
457
- loss_dict['l_d_right_eye'] = l_d_right_eye
458
- l_d_right_eye.backward()
459
- # mouth
460
- fake_d_pred, _ = self.net_d_mouth(self.mouths.detach())
461
- real_d_pred, _ = self.net_d_mouth(self.mouths_gt)
462
- l_d_mouth = self.cri_component(
463
- real_d_pred, True, is_disc=True) + self.cri_gan(
464
- fake_d_pred, False, is_disc=True)
465
- loss_dict['l_d_mouth'] = l_d_mouth
466
- l_d_mouth.backward()
467
-
468
- self.optimizer_d_left_eye.step()
469
- self.optimizer_d_right_eye.step()
470
- self.optimizer_d_mouth.step()
471
-
472
- self.log_dict = self.reduce_loss_dict(loss_dict)
473
-
474
- def test(self):
475
- with torch.no_grad():
476
- if hasattr(self, 'net_g_ema'):
477
- self.net_g_ema.eval()
478
- self.output, _ = self.net_g_ema(self.lq)
479
- else:
480
- logger = get_root_logger()
481
- logger.warning('Do not have self.net_g_ema, use self.net_g.')
482
- self.net_g.eval()
483
- self.output, _ = self.net_g(self.lq)
484
- self.net_g.train()
485
-
486
- def dist_validation(self, dataloader, current_iter, tb_logger, save_img):
487
- if self.opt['rank'] == 0:
488
- self.nondist_validation(dataloader, current_iter, tb_logger, save_img)
489
-
490
- def nondist_validation(self, dataloader, current_iter, tb_logger, save_img):
491
- dataset_name = dataloader.dataset.opt['name']
492
- with_metrics = self.opt['val'].get('metrics') is not None
493
- use_pbar = self.opt['val'].get('pbar', False)
494
-
495
- if with_metrics:
496
- if not hasattr(self, 'metric_results'): # only execute in the first run
497
- self.metric_results = {metric: 0 for metric in self.opt['val']['metrics'].keys()}
498
- # initialize the best metric results for each dataset_name (supporting multiple validation datasets)
499
- self._initialize_best_metric_results(dataset_name)
500
- # zero self.metric_results
501
- self.metric_results = {metric: 0 for metric in self.metric_results}
502
-
503
- metric_data = dict()
504
- if use_pbar:
505
- pbar = tqdm(total=len(dataloader), unit='image')
506
-
507
- for idx, val_data in enumerate(dataloader):
508
- img_name = osp.splitext(osp.basename(val_data['lq_path'][0]))[0]
509
- self.feed_data(val_data)
510
- self.test()
511
-
512
- sr_img = tensor2img(self.output.detach().cpu(), min_max=(-1, 1))
513
- metric_data['img'] = sr_img
514
- if hasattr(self, 'gt'):
515
- gt_img = tensor2img(self.gt.detach().cpu(), min_max=(-1, 1))
516
- metric_data['img2'] = gt_img
517
- del self.gt
518
-
519
- # tentative for out of GPU memory
520
- del self.lq
521
- del self.output
522
- torch.cuda.empty_cache()
523
-
524
- if save_img:
525
- if self.opt['is_train']:
526
- save_img_path = osp.join(self.opt['path']['visualization'], img_name,
527
- f'{img_name}_{current_iter}.png')
528
- else:
529
- if self.opt['val']['suffix']:
530
- save_img_path = osp.join(self.opt['path']['visualization'], dataset_name,
531
- f'{img_name}_{self.opt["val"]["suffix"]}.png')
532
- else:
533
- save_img_path = osp.join(self.opt['path']['visualization'], dataset_name,
534
- f'{img_name}_{self.opt["name"]}.png')
535
- imwrite(sr_img, save_img_path)
536
-
537
- if with_metrics:
538
- # calculate metrics
539
- for name, opt_ in self.opt['val']['metrics'].items():
540
- self.metric_results[name] += calculate_metric(metric_data, opt_)
541
- if use_pbar:
542
- pbar.update(1)
543
- pbar.set_description(f'Test {img_name}')
544
- if use_pbar:
545
- pbar.close()
546
-
547
- if with_metrics:
548
- for metric in self.metric_results.keys():
549
- self.metric_results[metric] /= (idx + 1)
550
- # update the best metric result
551
- self._update_best_metric_result(dataset_name, metric, self.metric_results[metric], current_iter)
552
-
553
- self._log_validation_metric_values(current_iter, dataset_name, tb_logger)
554
-
555
- def _log_validation_metric_values(self, current_iter, dataset_name, tb_logger):
556
- log_str = f'Validation {dataset_name}\n'
557
- for metric, value in self.metric_results.items():
558
- log_str += f'\t # {metric}: {value:.4f}'
559
- if hasattr(self, 'best_metric_results'):
560
- log_str += (f'\tBest: {self.best_metric_results[dataset_name][metric]["val"]:.4f} @ '
561
- f'{self.best_metric_results[dataset_name][metric]["iter"]} iter')
562
- log_str += '\n'
563
-
564
- logger = get_root_logger()
565
- logger.info(log_str)
566
- if tb_logger:
567
- for metric, value in self.metric_results.items():
568
- tb_logger.add_scalar(f'metrics/{dataset_name}/{metric}', value, current_iter)
569
-
570
- def save(self, epoch, current_iter):
571
- # save net_g and net_d
572
- self.save_network([self.net_g, self.net_g_ema], 'net_g', current_iter, param_key=['params', 'params_ema'])
573
- self.save_network(self.net_d, 'net_d', current_iter)
574
- # save component discriminators
575
- if self.use_facial_disc:
576
- self.save_network(self.net_d_left_eye, 'net_d_left_eye', current_iter)
577
- self.save_network(self.net_d_right_eye, 'net_d_right_eye', current_iter)
578
- self.save_network(self.net_d_mouth, 'net_d_mouth', current_iter)
579
- # save training state
580
- self.save_training_state(epoch, current_iter)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
gfpgan/train.py DELETED
@@ -1,11 +0,0 @@
1
- # flake8: noqa
2
- import os.path as osp
3
- from basicsr.train import train_pipeline
4
-
5
- import gfpgan.archs
6
- import gfpgan.data
7
- import gfpgan.models
8
-
9
- if __name__ == '__main__':
10
- root_path = osp.abspath(osp.join(__file__, osp.pardir, osp.pardir))
11
- train_pipeline(root_path)
 
 
 
 
 
 
 
 
 
 
 
 
gfpgan/utils.py DELETED
@@ -1,130 +0,0 @@
1
- import cv2
2
- import os
3
- import torch
4
- from basicsr.utils import img2tensor, tensor2img
5
- from basicsr.utils.download_util import load_file_from_url
6
- from facexlib.utils.face_restoration_helper import FaceRestoreHelper
7
- from torchvision.transforms.functional import normalize
8
-
9
- from gfpgan.archs.gfpganv1_arch import GFPGANv1
10
- from gfpgan.archs.gfpganv1_clean_arch import GFPGANv1Clean
11
-
12
- ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
13
-
14
-
15
- class GFPGANer():
16
- """Helper for restoration with GFPGAN.
17
-
18
- It will detect and crop faces, and then resize the faces to 512x512.
19
- GFPGAN is used to restored the resized faces.
20
- The background is upsampled with the bg_upsampler.
21
- Finally, the faces will be pasted back to the upsample background image.
22
-
23
- Args:
24
- model_path (str): The path to the GFPGAN model. It can be urls (will first download it automatically).
25
- upscale (float): The upscale of the final output. Default: 2.
26
- arch (str): The GFPGAN architecture. Option: clean | original. Default: clean.
27
- channel_multiplier (int): Channel multiplier for large networks of StyleGAN2. Default: 2.
28
- bg_upsampler (nn.Module): The upsampler for the background. Default: None.
29
- """
30
-
31
- def __init__(self, model_path, upscale=2, arch='clean', channel_multiplier=2, bg_upsampler=None):
32
- self.upscale = upscale
33
- self.bg_upsampler = bg_upsampler
34
-
35
- # initialize model
36
- self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
37
- # initialize the GFP-GAN
38
- if arch == 'clean':
39
- self.gfpgan = GFPGANv1Clean(
40
- out_size=512,
41
- num_style_feat=512,
42
- channel_multiplier=channel_multiplier,
43
- decoder_load_path=None,
44
- fix_decoder=False,
45
- num_mlp=8,
46
- input_is_latent=True,
47
- different_w=True,
48
- narrow=1,
49
- sft_half=True)
50
- else:
51
- self.gfpgan = GFPGANv1(
52
- out_size=512,
53
- num_style_feat=512,
54
- channel_multiplier=channel_multiplier,
55
- decoder_load_path=None,
56
- fix_decoder=True,
57
- num_mlp=8,
58
- input_is_latent=True,
59
- different_w=True,
60
- narrow=1,
61
- sft_half=True)
62
- # initialize face helper
63
- self.face_helper = FaceRestoreHelper(
64
- upscale,
65
- face_size=512,
66
- crop_ratio=(1, 1),
67
- det_model='retinaface_resnet50',
68
- save_ext='png',
69
- device=self.device)
70
-
71
- if model_path.startswith('https://'):
72
- model_path = load_file_from_url(
73
- url=model_path, model_dir=os.path.join(ROOT_DIR, 'gfpgan/weights'), progress=True, file_name=None)
74
- loadnet = torch.load(model_path)
75
- if 'params_ema' in loadnet:
76
- keyname = 'params_ema'
77
- else:
78
- keyname = 'params'
79
- self.gfpgan.load_state_dict(loadnet[keyname], strict=True)
80
- self.gfpgan.eval()
81
- self.gfpgan = self.gfpgan.to(self.device)
82
-
83
- @torch.no_grad()
84
- def enhance(self, img, has_aligned=False, only_center_face=False, paste_back=True):
85
- self.face_helper.clean_all()
86
-
87
- if has_aligned: # the inputs are already aligned
88
- img = cv2.resize(img, (512, 512))
89
- self.face_helper.cropped_faces = [img]
90
- else:
91
- self.face_helper.read_image(img)
92
- # get face landmarks for each face
93
- self.face_helper.get_face_landmarks_5(only_center_face=only_center_face, eye_dist_threshold=5)
94
- # eye_dist_threshold=5: skip faces whose eye distance is smaller than 5 pixels
95
- # TODO: even with eye_dist_threshold, it will still introduce wrong detections and restorations.
96
- # align and warp each face
97
- self.face_helper.align_warp_face()
98
-
99
- # face restoration
100
- for cropped_face in self.face_helper.cropped_faces:
101
- # prepare data
102
- cropped_face_t = img2tensor(cropped_face / 255., bgr2rgb=True, float32=True)
103
- normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)
104
- cropped_face_t = cropped_face_t.unsqueeze(0).to(self.device)
105
-
106
- try:
107
- output = self.gfpgan(cropped_face_t, return_rgb=False)[0]
108
- # convert to image
109
- restored_face = tensor2img(output.squeeze(0), rgb2bgr=True, min_max=(-1, 1))
110
- except RuntimeError as error:
111
- print(f'\tFailed inference for GFPGAN: {error}.')
112
- restored_face = cropped_face
113
-
114
- restored_face = restored_face.astype('uint8')
115
- self.face_helper.add_restored_face(restored_face)
116
-
117
- if not has_aligned and paste_back:
118
- # upsample the background
119
- if self.bg_upsampler is not None:
120
- # Now only support RealESRGAN for upsampling background
121
- bg_img = self.bg_upsampler.enhance(img, outscale=self.upscale)[0]
122
- else:
123
- bg_img = None
124
-
125
- self.face_helper.get_inverse_affine(None)
126
- # paste each restored face to the input image
127
- restored_img = self.face_helper.paste_faces_to_input_image(upsample_img=bg_img)
128
- return self.face_helper.cropped_faces, self.face_helper.restored_faces, restored_img
129
- else:
130
- return self.face_helper.cropped_faces, self.face_helper.restored_faces, None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
gfpgan/weights/README.md DELETED
@@ -1,3 +0,0 @@
1
- # Weights
2
-
3
- Put the downloaded weights to this folder.
 
 
 
 
inference_gfpgan.py DELETED
@@ -1,116 +0,0 @@
1
- import argparse
2
- import cv2
3
- import glob
4
- import numpy as np
5
- import os
6
- import torch
7
- from basicsr.utils import imwrite
8
-
9
- from gfpgan import GFPGANer
10
-
11
-
12
- def main():
13
- """Inference demo for GFPGAN.
14
- """
15
- parser = argparse.ArgumentParser()
16
- parser.add_argument('--upscale', type=int, default=2, help='The final upsampling scale of the image')
17
- parser.add_argument('--arch', type=str, default='clean', help='The GFPGAN architecture. Option: clean | original')
18
- parser.add_argument('--channel', type=int, default=2, help='Channel multiplier for large networks of StyleGAN2')
19
- parser.add_argument('--model_path', type=str, default='experiments/pretrained_models/GFPGANCleanv1-NoCE-C2.pth')
20
- parser.add_argument('--bg_upsampler', type=str, default='realesrgan', help='background upsampler')
21
- parser.add_argument(
22
- '--bg_tile', type=int, default=400, help='Tile size for background sampler, 0 for no tile during testing')
23
- parser.add_argument('--test_path', type=str, default='inputs/whole_imgs', help='Input folder')
24
- parser.add_argument('--suffix', type=str, default=None, help='Suffix of the restored faces')
25
- parser.add_argument('--only_center_face', action='store_true', help='Only restore the center face')
26
- parser.add_argument('--aligned', action='store_true', help='Input are aligned faces')
27
- parser.add_argument('--paste_back', action='store_false', help='Paste the restored faces back to images')
28
- parser.add_argument('--save_root', type=str, default='results', help='Path to save root')
29
- parser.add_argument(
30
- '--ext',
31
- type=str,
32
- default='auto',
33
- help='Image extension. Options: auto | jpg | png, auto means using the same extension as inputs')
34
- args = parser.parse_args()
35
-
36
- args = parser.parse_args()
37
- if args.test_path.endswith('/'):
38
- args.test_path = args.test_path[:-1]
39
- os.makedirs(args.save_root, exist_ok=True)
40
-
41
- # background upsampler
42
- if args.bg_upsampler == 'realesrgan':
43
- if not torch.cuda.is_available(): # CPU
44
- import warnings
45
- warnings.warn('The unoptimized RealESRGAN is very slow on CPU. We do not use it. '
46
- 'If you really want to use it, please modify the corresponding codes.')
47
- bg_upsampler = None
48
- else:
49
- from basicsr.archs.rrdbnet_arch import RRDBNet
50
- from realesrgan import RealESRGANer
51
- model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2)
52
- bg_upsampler = RealESRGANer(
53
- scale=2,
54
- model_path='https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth',
55
- model=model,
56
- tile=args.bg_tile,
57
- tile_pad=10,
58
- pre_pad=0,
59
- half=True) # need to set False in CPU mode
60
- else:
61
- bg_upsampler = None
62
- # set up GFPGAN restorer
63
- restorer = GFPGANer(
64
- model_path=args.model_path,
65
- upscale=args.upscale,
66
- arch=args.arch,
67
- channel_multiplier=args.channel,
68
- bg_upsampler=bg_upsampler)
69
-
70
- img_list = sorted(glob.glob(os.path.join(args.test_path, '*')))
71
- for img_path in img_list:
72
- # read image
73
- img_name = os.path.basename(img_path)
74
- print(f'Processing {img_name} ...')
75
- basename, ext = os.path.splitext(img_name)
76
- input_img = cv2.imread(img_path, cv2.IMREAD_COLOR)
77
-
78
- # restore faces and background if necessary
79
- cropped_faces, restored_faces, restored_img = restorer.enhance(
80
- input_img, has_aligned=args.aligned, only_center_face=args.only_center_face, paste_back=args.paste_back)
81
-
82
- # save faces
83
- for idx, (cropped_face, restored_face) in enumerate(zip(cropped_faces, restored_faces)):
84
- # save cropped face
85
- save_crop_path = os.path.join(args.save_root, 'cropped_faces', f'{basename}_{idx:02d}.png')
86
- imwrite(cropped_face, save_crop_path)
87
- # save restored face
88
- if args.suffix is not None:
89
- save_face_name = f'{basename}_{idx:02d}_{args.suffix}.png'
90
- else:
91
- save_face_name = f'{basename}_{idx:02d}.png'
92
- save_restore_path = os.path.join(args.save_root, 'restored_faces', save_face_name)
93
- imwrite(restored_face, save_restore_path)
94
- # save comparison image
95
- cmp_img = np.concatenate((cropped_face, restored_face), axis=1)
96
- imwrite(cmp_img, os.path.join(args.save_root, 'cmp', f'{basename}_{idx:02d}.png'))
97
-
98
- # save restored img
99
- if restored_img is not None:
100
- if args.ext == 'auto':
101
- extension = ext[1:]
102
- else:
103
- extension = args.ext
104
-
105
- if args.suffix is not None:
106
- save_restore_path = os.path.join(args.save_root, 'restored_imgs',
107
- f'{basename}_{args.suffix}.{extension}')
108
- else:
109
- save_restore_path = os.path.join(args.save_root, 'restored_imgs', f'{basename}.{extension}')
110
- imwrite(restored_img, save_restore_path)
111
-
112
- print(f'Results are in the [{args.save_root}] folder.')
113
-
114
-
115
- if __name__ == '__main__':
116
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
options/train_gfpgan_v1.yml DELETED
@@ -1,216 +0,0 @@
1
- # general settings
2
- name: train_GFPGANv1_512
3
- model_type: GFPGANModel
4
- num_gpu: auto # officially, we use 4 GPUs
5
- manual_seed: 0
6
-
7
- # dataset and data loader settings
8
- datasets:
9
- train:
10
- name: FFHQ
11
- type: FFHQDegradationDataset
12
- # dataroot_gt: datasets/ffhq/ffhq_512.lmdb
13
- dataroot_gt: datasets/ffhq/ffhq_512
14
- io_backend:
15
- # type: lmdb
16
- type: disk
17
-
18
- use_hflip: true
19
- mean: [0.5, 0.5, 0.5]
20
- std: [0.5, 0.5, 0.5]
21
- out_size: 512
22
-
23
- blur_kernel_size: 41
24
- kernel_list: ['iso', 'aniso']
25
- kernel_prob: [0.5, 0.5]
26
- blur_sigma: [0.1, 10]
27
- downsample_range: [0.8, 8]
28
- noise_range: [0, 20]
29
- jpeg_range: [60, 100]
30
-
31
- # color jitter and gray
32
- color_jitter_prob: 0.3
33
- color_jitter_shift: 20
34
- color_jitter_pt_prob: 0.3
35
- gray_prob: 0.01
36
-
37
- # If you do not want colorization, please set
38
- # color_jitter_prob: ~
39
- # color_jitter_pt_prob: ~
40
- # gray_prob: 0.01
41
- # gt_gray: True
42
-
43
- crop_components: true
44
- component_path: experiments/pretrained_models/FFHQ_eye_mouth_landmarks_512.pth
45
- eye_enlarge_ratio: 1.4
46
-
47
- # data loader
48
- use_shuffle: true
49
- num_worker_per_gpu: 6
50
- batch_size_per_gpu: 3
51
- dataset_enlarge_ratio: 1
52
- prefetch_mode: ~
53
-
54
- val:
55
- # Please modify accordingly to use your own validation
56
- # Or comment the val block if do not need validation during training
57
- name: validation
58
- type: PairedImageDataset
59
- dataroot_lq: datasets/faces/validation/input
60
- dataroot_gt: datasets/faces/validation/reference
61
- io_backend:
62
- type: disk
63
- mean: [0.5, 0.5, 0.5]
64
- std: [0.5, 0.5, 0.5]
65
- scale: 1
66
-
67
- # network structures
68
- network_g:
69
- type: GFPGANv1
70
- out_size: 512
71
- num_style_feat: 512
72
- channel_multiplier: 1
73
- resample_kernel: [1, 3, 3, 1]
74
- decoder_load_path: experiments/pretrained_models/StyleGAN2_512_Cmul1_FFHQ_B12G4_scratch_800k.pth
75
- fix_decoder: true
76
- num_mlp: 8
77
- lr_mlp: 0.01
78
- input_is_latent: true
79
- different_w: true
80
- narrow: 1
81
- sft_half: true
82
-
83
- network_d:
84
- type: StyleGAN2Discriminator
85
- out_size: 512
86
- channel_multiplier: 1
87
- resample_kernel: [1, 3, 3, 1]
88
-
89
- network_d_left_eye:
90
- type: FacialComponentDiscriminator
91
-
92
- network_d_right_eye:
93
- type: FacialComponentDiscriminator
94
-
95
- network_d_mouth:
96
- type: FacialComponentDiscriminator
97
-
98
- network_identity:
99
- type: ResNetArcFace
100
- block: IRBlock
101
- layers: [2, 2, 2, 2]
102
- use_se: False
103
-
104
- # path
105
- path:
106
- pretrain_network_g: ~
107
- param_key_g: params_ema
108
- strict_load_g: ~
109
- pretrain_network_d: ~
110
- pretrain_network_d_left_eye: ~
111
- pretrain_network_d_right_eye: ~
112
- pretrain_network_d_mouth: ~
113
- pretrain_network_identity: experiments/pretrained_models/arcface_resnet18.pth
114
- # resume
115
- resume_state: ~
116
- ignore_resume_networks: ['network_identity']
117
-
118
- # training settings
119
- train:
120
- optim_g:
121
- type: Adam
122
- lr: !!float 2e-3
123
- optim_d:
124
- type: Adam
125
- lr: !!float 2e-3
126
- optim_component:
127
- type: Adam
128
- lr: !!float 2e-3
129
-
130
- scheduler:
131
- type: MultiStepLR
132
- milestones: [600000, 700000]
133
- gamma: 0.5
134
-
135
- total_iter: 800000
136
- warmup_iter: -1 # no warm up
137
-
138
- # losses
139
- # pixel loss
140
- pixel_opt:
141
- type: L1Loss
142
- loss_weight: !!float 1e-1
143
- reduction: mean
144
- # L1 loss used in pyramid loss, component style loss and identity loss
145
- L1_opt:
146
- type: L1Loss
147
- loss_weight: 1
148
- reduction: mean
149
-
150
- # image pyramid loss
151
- pyramid_loss_weight: 1
152
- remove_pyramid_loss: 50000
153
- # perceptual loss (content and style losses)
154
- perceptual_opt:
155
- type: PerceptualLoss
156
- layer_weights:
157
- # before relu
158
- 'conv1_2': 0.1
159
- 'conv2_2': 0.1
160
- 'conv3_4': 1
161
- 'conv4_4': 1
162
- 'conv5_4': 1
163
- vgg_type: vgg19
164
- use_input_norm: true
165
- perceptual_weight: !!float 1
166
- style_weight: 50
167
- range_norm: true
168
- criterion: l1
169
- # gan loss
170
- gan_opt:
171
- type: GANLoss
172
- gan_type: wgan_softplus
173
- loss_weight: !!float 1e-1
174
- # r1 regularization for discriminator
175
- r1_reg_weight: 10
176
- # facial component loss
177
- gan_component_opt:
178
- type: GANLoss
179
- gan_type: vanilla
180
- real_label_val: 1.0
181
- fake_label_val: 0.0
182
- loss_weight: !!float 1
183
- comp_style_weight: 200
184
- # identity loss
185
- identity_weight: 10
186
-
187
- net_d_iters: 1
188
- net_d_init_iters: 0
189
- net_d_reg_every: 16
190
-
191
- # validation settings
192
- val:
193
- val_freq: !!float 5e3
194
- save_img: true
195
-
196
- metrics:
197
- psnr: # metric name
198
- type: calculate_psnr
199
- crop_border: 0
200
- test_y_channel: false
201
-
202
- # logging settings
203
- logger:
204
- print_freq: 100
205
- save_checkpoint_freq: !!float 5e3
206
- use_tb_logger: true
207
- wandb:
208
- project: ~
209
- resume_id: ~
210
-
211
- # dist training settings
212
- dist_params:
213
- backend: nccl
214
- port: 29500
215
-
216
- find_unused_parameters: true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
options/train_gfpgan_v1_simple.yml DELETED
@@ -1,182 +0,0 @@
1
- # general settings
2
- name: train_GFPGANv1_512_simple
3
- model_type: GFPGANModel
4
- num_gpu: auto # officially, we use 4 GPUs
5
- manual_seed: 0
6
-
7
- # dataset and data loader settings
8
- datasets:
9
- train:
10
- name: FFHQ
11
- type: FFHQDegradationDataset
12
- # dataroot_gt: datasets/ffhq/ffhq_512.lmdb
13
- dataroot_gt: datasets/ffhq/ffhq_512
14
- io_backend:
15
- # type: lmdb
16
- type: disk
17
-
18
- use_hflip: true
19
- mean: [0.5, 0.5, 0.5]
20
- std: [0.5, 0.5, 0.5]
21
- out_size: 512
22
-
23
- blur_kernel_size: 41
24
- kernel_list: ['iso', 'aniso']
25
- kernel_prob: [0.5, 0.5]
26
- blur_sigma: [0.1, 10]
27
- downsample_range: [0.8, 8]
28
- noise_range: [0, 20]
29
- jpeg_range: [60, 100]
30
-
31
- # color jitter and gray
32
- color_jitter_prob: 0.3
33
- color_jitter_shift: 20
34
- color_jitter_pt_prob: 0.3
35
- gray_prob: 0.01
36
-
37
- # If you do not want colorization, please set
38
- # color_jitter_prob: ~
39
- # color_jitter_pt_prob: ~
40
- # gray_prob: 0.01
41
- # gt_gray: True
42
-
43
- # data loader
44
- use_shuffle: true
45
- num_worker_per_gpu: 6
46
- batch_size_per_gpu: 3
47
- dataset_enlarge_ratio: 1
48
- prefetch_mode: ~
49
-
50
- val:
51
- # Please modify accordingly to use your own validation
52
- # Or comment the val block if do not need validation during training
53
- name: validation
54
- type: PairedImageDataset
55
- dataroot_lq: datasets/faces/validation/input
56
- dataroot_gt: datasets/faces/validation/reference
57
- io_backend:
58
- type: disk
59
- mean: [0.5, 0.5, 0.5]
60
- std: [0.5, 0.5, 0.5]
61
- scale: 1
62
-
63
- # network structures
64
- network_g:
65
- type: GFPGANv1
66
- out_size: 512
67
- num_style_feat: 512
68
- channel_multiplier: 1
69
- resample_kernel: [1, 3, 3, 1]
70
- decoder_load_path: experiments/pretrained_models/StyleGAN2_512_Cmul1_FFHQ_B12G4_scratch_800k.pth
71
- fix_decoder: true
72
- num_mlp: 8
73
- lr_mlp: 0.01
74
- input_is_latent: true
75
- different_w: true
76
- narrow: 1
77
- sft_half: true
78
-
79
- network_d:
80
- type: StyleGAN2Discriminator
81
- out_size: 512
82
- channel_multiplier: 1
83
- resample_kernel: [1, 3, 3, 1]
84
-
85
-
86
- # path
87
- path:
88
- pretrain_network_g: ~
89
- param_key_g: params_ema
90
- strict_load_g: ~
91
- pretrain_network_d: ~
92
- resume_state: ~
93
-
94
- # training settings
95
- train:
96
- optim_g:
97
- type: Adam
98
- lr: !!float 2e-3
99
- optim_d:
100
- type: Adam
101
- lr: !!float 2e-3
102
- optim_component:
103
- type: Adam
104
- lr: !!float 2e-3
105
-
106
- scheduler:
107
- type: MultiStepLR
108
- milestones: [600000, 700000]
109
- gamma: 0.5
110
-
111
- total_iter: 800000
112
- warmup_iter: -1 # no warm up
113
-
114
- # losses
115
- # pixel loss
116
- pixel_opt:
117
- type: L1Loss
118
- loss_weight: !!float 1e-1
119
- reduction: mean
120
- # L1 loss used in pyramid loss, component style loss and identity loss
121
- L1_opt:
122
- type: L1Loss
123
- loss_weight: 1
124
- reduction: mean
125
-
126
- # image pyramid loss
127
- pyramid_loss_weight: 1
128
- remove_pyramid_loss: 50000
129
- # perceptual loss (content and style losses)
130
- perceptual_opt:
131
- type: PerceptualLoss
132
- layer_weights:
133
- # before relu
134
- 'conv1_2': 0.1
135
- 'conv2_2': 0.1
136
- 'conv3_4': 1
137
- 'conv4_4': 1
138
- 'conv5_4': 1
139
- vgg_type: vgg19
140
- use_input_norm: true
141
- perceptual_weight: !!float 1
142
- style_weight: 50
143
- range_norm: true
144
- criterion: l1
145
- # gan loss
146
- gan_opt:
147
- type: GANLoss
148
- gan_type: wgan_softplus
149
- loss_weight: !!float 1e-1
150
- # r1 regularization for discriminator
151
- r1_reg_weight: 10
152
-
153
- net_d_iters: 1
154
- net_d_init_iters: 0
155
- net_d_reg_every: 16
156
-
157
- # validation settings
158
- val:
159
- val_freq: !!float 5e3
160
- save_img: true
161
-
162
- metrics:
163
- psnr: # metric name
164
- type: calculate_psnr
165
- crop_border: 0
166
- test_y_channel: false
167
-
168
- # logging settings
169
- logger:
170
- print_freq: 100
171
- save_checkpoint_freq: !!float 5e3
172
- use_tb_logger: true
173
- wandb:
174
- project: ~
175
- resume_id: ~
176
-
177
- # dist training settings
178
- dist_params:
179
- backend: nccl
180
- port: 29500
181
-
182
- find_unused_parameters: true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
packages.txt CHANGED
@@ -1,3 +1,3 @@
1
- ffmpeg
2
- libsm6
3
  libxext6
 
1
+ ffmpeg
2
+ libsm6
3
  libxext6
requirements.txt CHANGED
@@ -1,10 +1,13 @@
1
  torch>=1.7
2
- numpy<1.21 # numba requires numpy<1.21,>=1.17
 
 
 
 
3
  opencv-python
4
  torchvision
5
  scipy
6
  tqdm
7
  lmdb
8
  pyyaml
9
- tb-nightly
10
- yapf
 
1
  torch>=1.7
2
+ basicsr>=1.4.2
3
+ facexlib>=0.2.5
4
+ gfpgan>=1.3.4
5
+ realesrgan>=0.2.5
6
+ numpy
7
  opencv-python
8
  torchvision
9
  scipy
10
  tqdm
11
  lmdb
12
  pyyaml
13
+ yapf
 
scripts/parse_landmark.py DELETED
@@ -1,85 +0,0 @@
1
- import cv2
2
- import json
3
- import numpy as np
4
- import os
5
- import torch
6
- from basicsr.utils import FileClient, imfrombytes
7
- from collections import OrderedDict
8
-
9
- # ---------------------------- This script is used to parse facial landmarks ------------------------------------- #
10
- # Configurations
11
- save_img = False
12
- scale = 0.5 # 0.5 for official FFHQ (512x512), 1 for others
13
- enlarge_ratio = 1.4 # only for eyes
14
- json_path = 'ffhq-dataset-v2.json'
15
- face_path = 'datasets/ffhq/ffhq_512.lmdb'
16
- save_path = './FFHQ_eye_mouth_landmarks_512.pth'
17
-
18
- print('Load JSON metadata...')
19
- # use the official json file in FFHQ dataset
20
- with open(json_path, 'rb') as f:
21
- json_data = json.load(f, object_pairs_hook=OrderedDict)
22
-
23
- print('Open LMDB file...')
24
- # read ffhq images
25
- file_client = FileClient('lmdb', db_paths=face_path)
26
- with open(os.path.join(face_path, 'meta_info.txt')) as fin:
27
- paths = [line.split('.')[0] for line in fin]
28
-
29
- save_dict = {}
30
-
31
- for item_idx, item in enumerate(json_data.values()):
32
- print(f'\r{item_idx} / {len(json_data)}, {item["image"]["file_path"]} ', end='', flush=True)
33
-
34
- # parse landmarks
35
- lm = np.array(item['image']['face_landmarks'])
36
- lm = lm * scale
37
-
38
- item_dict = {}
39
- # get image
40
- if save_img:
41
- img_bytes = file_client.get(paths[item_idx])
42
- img = imfrombytes(img_bytes, float32=True)
43
-
44
- # get landmarks for each component
45
- map_left_eye = list(range(36, 42))
46
- map_right_eye = list(range(42, 48))
47
- map_mouth = list(range(48, 68))
48
-
49
- # eye_left
50
- mean_left_eye = np.mean(lm[map_left_eye], 0) # (x, y)
51
- half_len_left_eye = np.max((np.max(np.max(lm[map_left_eye], 0) - np.min(lm[map_left_eye], 0)) / 2, 16))
52
- item_dict['left_eye'] = [mean_left_eye[0], mean_left_eye[1], half_len_left_eye]
53
- # mean_left_eye[0] = 512 - mean_left_eye[0] # for testing flip
54
- half_len_left_eye *= enlarge_ratio
55
- loc_left_eye = np.hstack((mean_left_eye - half_len_left_eye + 1, mean_left_eye + half_len_left_eye)).astype(int)
56
- if save_img:
57
- eye_left_img = img[loc_left_eye[1]:loc_left_eye[3], loc_left_eye[0]:loc_left_eye[2], :]
58
- cv2.imwrite(f'tmp/{item_idx:08d}_eye_left.png', eye_left_img * 255)
59
-
60
- # eye_right
61
- mean_right_eye = np.mean(lm[map_right_eye], 0)
62
- half_len_right_eye = np.max((np.max(np.max(lm[map_right_eye], 0) - np.min(lm[map_right_eye], 0)) / 2, 16))
63
- item_dict['right_eye'] = [mean_right_eye[0], mean_right_eye[1], half_len_right_eye]
64
- # mean_right_eye[0] = 512 - mean_right_eye[0] # # for testing flip
65
- half_len_right_eye *= enlarge_ratio
66
- loc_right_eye = np.hstack(
67
- (mean_right_eye - half_len_right_eye + 1, mean_right_eye + half_len_right_eye)).astype(int)
68
- if save_img:
69
- eye_right_img = img[loc_right_eye[1]:loc_right_eye[3], loc_right_eye[0]:loc_right_eye[2], :]
70
- cv2.imwrite(f'tmp/{item_idx:08d}_eye_right.png', eye_right_img * 255)
71
-
72
- # mouth
73
- mean_mouth = np.mean(lm[map_mouth], 0)
74
- half_len_mouth = np.max((np.max(np.max(lm[map_mouth], 0) - np.min(lm[map_mouth], 0)) / 2, 16))
75
- item_dict['mouth'] = [mean_mouth[0], mean_mouth[1], half_len_mouth]
76
- # mean_mouth[0] = 512 - mean_mouth[0] # for testing flip
77
- loc_mouth = np.hstack((mean_mouth - half_len_mouth + 1, mean_mouth + half_len_mouth)).astype(int)
78
- if save_img:
79
- mouth_img = img[loc_mouth[1]:loc_mouth[3], loc_mouth[0]:loc_mouth[2], :]
80
- cv2.imwrite(f'tmp/{item_idx:08d}_mouth.png', mouth_img * 255)
81
-
82
- save_dict[f'{item_idx:08d}'] = item_dict
83
-
84
- print('Save...')
85
- torch.save(save_dict, save_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
setup.cfg DELETED
@@ -1,33 +0,0 @@
1
- [flake8]
2
- ignore =
3
- # line break before binary operator (W503)
4
- W503,
5
- # line break after binary operator (W504)
6
- W504,
7
- max-line-length=120
8
-
9
- [yapf]
10
- based_on_style = pep8
11
- column_limit = 120
12
- blank_line_before_nested_class_or_def = true
13
- split_before_expression_after_opening_paren = true
14
-
15
- [isort]
16
- line_length = 120
17
- multi_line_output = 0
18
- known_standard_library = pkg_resources,setuptools
19
- known_first_party = gfpgan
20
- known_third_party = basicsr,cv2,facexlib,numpy,pytest,torch,torchvision,tqdm,yaml
21
- no_lines_before = STDLIB,LOCALFOLDER
22
- default_section = THIRDPARTY
23
-
24
- [codespell]
25
- skip = .git,./docs/build
26
- count =
27
- quiet-level = 3
28
-
29
- [aliases]
30
- test=pytest
31
-
32
- [tool:pytest]
33
- addopts=tests/
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
setup.py DELETED
@@ -1,107 +0,0 @@
1
- #!/usr/bin/env python
2
-
3
- from setuptools import find_packages, setup
4
-
5
- import os
6
- import subprocess
7
- import time
8
-
9
- version_file = 'gfpgan/version.py'
10
-
11
-
12
- def readme():
13
- with open('README.md', encoding='utf-8') as f:
14
- content = f.read()
15
- return content
16
-
17
-
18
- def get_git_hash():
19
-
20
- def _minimal_ext_cmd(cmd):
21
- # construct minimal environment
22
- env = {}
23
- for k in ['SYSTEMROOT', 'PATH', 'HOME']:
24
- v = os.environ.get(k)
25
- if v is not None:
26
- env[k] = v
27
- # LANGUAGE is used on win32
28
- env['LANGUAGE'] = 'C'
29
- env['LANG'] = 'C'
30
- env['LC_ALL'] = 'C'
31
- out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
32
- return out
33
-
34
- try:
35
- out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
36
- sha = out.strip().decode('ascii')
37
- except OSError:
38
- sha = 'unknown'
39
-
40
- return sha
41
-
42
-
43
- def get_hash():
44
- if os.path.exists('.git'):
45
- sha = get_git_hash()[:7]
46
- else:
47
- sha = 'unknown'
48
-
49
- return sha
50
-
51
-
52
- def write_version_py():
53
- content = """# GENERATED VERSION FILE
54
- # TIME: {}
55
- __version__ = '{}'
56
- __gitsha__ = '{}'
57
- version_info = ({})
58
- """
59
- sha = get_hash()
60
- with open('VERSION', 'r') as f:
61
- SHORT_VERSION = f.read().strip()
62
- VERSION_INFO = ', '.join([x if x.isdigit() else f'"{x}"' for x in SHORT_VERSION.split('.')])
63
-
64
- version_file_str = content.format(time.asctime(), SHORT_VERSION, sha, VERSION_INFO)
65
- with open(version_file, 'w') as f:
66
- f.write(version_file_str)
67
-
68
-
69
- def get_version():
70
- with open(version_file, 'r') as f:
71
- exec(compile(f.read(), version_file, 'exec'))
72
- return locals()['__version__']
73
-
74
-
75
- def get_requirements(filename='requirements.txt'):
76
- here = os.path.dirname(os.path.realpath(__file__))
77
- with open(os.path.join(here, filename), 'r') as f:
78
- requires = [line.replace('\n', '') for line in f.readlines()]
79
- return requires
80
-
81
-
82
- if __name__ == '__main__':
83
- write_version_py()
84
- setup(
85
- name='gfpgan',
86
- version=get_version(),
87
- description='GFPGAN aims at developing Practical Algorithms for Real-world Face Restoration',
88
- long_description=readme(),
89
- long_description_content_type='text/markdown',
90
- author='Xintao Wang',
91
- author_email='xintao.wang@outlook.com',
92
- keywords='computer vision, pytorch, image restoration, super-resolution, face restoration, gan, gfpgan',
93
- url='https://github.com/TencentARC/GFPGAN',
94
- include_package_data=True,
95
- packages=find_packages(exclude=('options', 'datasets', 'experiments', 'results', 'tb_logger', 'wandb')),
96
- classifiers=[
97
- 'Development Status :: 4 - Beta',
98
- 'License :: OSI Approved :: Apache Software License',
99
- 'Operating System :: OS Independent',
100
- 'Programming Language :: Python :: 3',
101
- 'Programming Language :: Python :: 3.7',
102
- 'Programming Language :: Python :: 3.8',
103
- ],
104
- license='Apache License Version 2.0',
105
- setup_requires=['cython', 'numpy'],
106
- install_requires=get_requirements(),
107
- zip_safe=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
tests/data/ffhq_gt.lmdb/data.mdb DELETED
Binary file (455 kB)
 
tests/data/ffhq_gt.lmdb/lock.mdb DELETED
Binary file (8.19 kB)
 
tests/data/ffhq_gt.lmdb/meta_info.txt DELETED
@@ -1 +0,0 @@
1
- 00000000.png (512,512,3) 1
 
 
tests/data/gt/00000000.png DELETED
Binary file (440 kB)
 
tests/data/test_eye_mouth_landmarks.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:131583fca2cc346652f8754eb3c5a0bdeda808686039ff10ead7a26254b72358
3
- size 943
 
 
 
 
tests/data/test_ffhq_degradation_dataset.yml DELETED
@@ -1,24 +0,0 @@
1
- name: UnitTest
2
- type: FFHQDegradationDataset
3
- dataroot_gt: tests/data/gt
4
- io_backend:
5
- type: disk
6
-
7
- use_hflip: true
8
- mean: [0.5, 0.5, 0.5]
9
- std: [0.5, 0.5, 0.5]
10
- out_size: 512
11
-
12
- blur_kernel_size: 41
13
- kernel_list: ['iso', 'aniso']
14
- kernel_prob: [0.5, 0.5]
15
- blur_sigma: [0.1, 10]
16
- downsample_range: [0.8, 8]
17
- noise_range: [0, 20]
18
- jpeg_range: [60, 100]
19
-
20
- # color jitter and gray
21
- color_jitter_prob: 1
22
- color_jitter_shift: 20
23
- color_jitter_pt_prob: 1
24
- gray_prob: 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
tests/data/test_gfpgan_model.yml DELETED
@@ -1,140 +0,0 @@
1
- num_gpu: 1
2
- manual_seed: 0
3
- is_train: True
4
- dist: False
5
-
6
- # network structures
7
- network_g:
8
- type: GFPGANv1
9
- out_size: 512
10
- num_style_feat: 512
11
- channel_multiplier: 1
12
- resample_kernel: [1, 3, 3, 1]
13
- decoder_load_path: ~
14
- fix_decoder: true
15
- num_mlp: 8
16
- lr_mlp: 0.01
17
- input_is_latent: true
18
- different_w: true
19
- narrow: 0.5
20
- sft_half: true
21
-
22
- network_d:
23
- type: StyleGAN2Discriminator
24
- out_size: 512
25
- channel_multiplier: 1
26
- resample_kernel: [1, 3, 3, 1]
27
-
28
- network_d_left_eye:
29
- type: FacialComponentDiscriminator
30
-
31
- network_d_right_eye:
32
- type: FacialComponentDiscriminator
33
-
34
- network_d_mouth:
35
- type: FacialComponentDiscriminator
36
-
37
- network_identity:
38
- type: ResNetArcFace
39
- block: IRBlock
40
- layers: [2, 2, 2, 2]
41
- use_se: False
42
-
43
- # path
44
- path:
45
- pretrain_network_g: ~
46
- param_key_g: params_ema
47
- strict_load_g: ~
48
- pretrain_network_d: ~
49
- pretrain_network_d_left_eye: ~
50
- pretrain_network_d_right_eye: ~
51
- pretrain_network_d_mouth: ~
52
- pretrain_network_identity: ~
53
- # resume
54
- resume_state: ~
55
- ignore_resume_networks: ['network_identity']
56
-
57
- # training settings
58
- train:
59
- optim_g:
60
- type: Adam
61
- lr: !!float 2e-3
62
- optim_d:
63
- type: Adam
64
- lr: !!float 2e-3
65
- optim_component:
66
- type: Adam
67
- lr: !!float 2e-3
68
-
69
- scheduler:
70
- type: MultiStepLR
71
- milestones: [600000, 700000]
72
- gamma: 0.5
73
-
74
- total_iter: 800000
75
- warmup_iter: -1 # no warm up
76
-
77
- # losses
78
- # pixel loss
79
- pixel_opt:
80
- type: L1Loss
81
- loss_weight: !!float 1e-1
82
- reduction: mean
83
- # L1 loss used in pyramid loss, component style loss and identity loss
84
- L1_opt:
85
- type: L1Loss
86
- loss_weight: 1
87
- reduction: mean
88
-
89
- # image pyramid loss
90
- pyramid_loss_weight: 1
91
- remove_pyramid_loss: 50000
92
- # perceptual loss (content and style losses)
93
- perceptual_opt:
94
- type: PerceptualLoss
95
- layer_weights:
96
- # before relu
97
- 'conv1_2': 0.1
98
- 'conv2_2': 0.1
99
- 'conv3_4': 1
100
- 'conv4_4': 1
101
- 'conv5_4': 1
102
- vgg_type: vgg19
103
- use_input_norm: true
104
- perceptual_weight: !!float 1
105
- style_weight: 50
106
- range_norm: true
107
- criterion: l1
108
- # gan loss
109
- gan_opt:
110
- type: GANLoss
111
- gan_type: wgan_softplus
112
- loss_weight: !!float 1e-1
113
- # r1 regularization for discriminator
114
- r1_reg_weight: 10
115
- # facial component loss
116
- gan_component_opt:
117
- type: GANLoss
118
- gan_type: vanilla
119
- real_label_val: 1.0
120
- fake_label_val: 0.0
121
- loss_weight: !!float 1
122
- comp_style_weight: 200
123
- # identity loss
124
- identity_weight: 10
125
-
126
- net_d_iters: 1
127
- net_d_init_iters: 0
128
- net_d_reg_every: 1
129
-
130
- # validation settings
131
- val:
132
- val_freq: !!float 5e3
133
- save_img: True
134
- use_pbar: True
135
-
136
- metrics:
137
- psnr: # metric name
138
- type: calculate_psnr
139
- crop_border: 0
140
- test_y_channel: false
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
tests/test_arcface_arch.py DELETED
@@ -1,49 +0,0 @@
1
- import torch
2
-
3
- from gfpgan.archs.arcface_arch import BasicBlock, Bottleneck, ResNetArcFace
4
-
5
-
6
- def test_resnetarcface():
7
- """Test arch: ResNetArcFace."""
8
-
9
- # model init and forward (gpu)
10
- if torch.cuda.is_available():
11
- net = ResNetArcFace(block='IRBlock', layers=(2, 2, 2, 2), use_se=True).cuda().eval()
12
- img = torch.rand((1, 1, 128, 128), dtype=torch.float32).cuda()
13
- output = net(img)
14
- assert output.shape == (1, 512)
15
-
16
- # -------------------- without SE block ----------------------- #
17
- net = ResNetArcFace(block='IRBlock', layers=(2, 2, 2, 2), use_se=False).cuda().eval()
18
- output = net(img)
19
- assert output.shape == (1, 512)
20
-
21
-
22
- def test_basicblock():
23
- """Test the BasicBlock in arcface_arch"""
24
- block = BasicBlock(1, 3, stride=1, downsample=None).cuda()
25
- img = torch.rand((1, 1, 12, 12), dtype=torch.float32).cuda()
26
- output = block(img)
27
- assert output.shape == (1, 3, 12, 12)
28
-
29
- # ----------------- use the downsmaple module--------------- #
30
- downsample = torch.nn.UpsamplingNearest2d(scale_factor=0.5).cuda()
31
- block = BasicBlock(1, 3, stride=2, downsample=downsample).cuda()
32
- img = torch.rand((1, 1, 12, 12), dtype=torch.float32).cuda()
33
- output = block(img)
34
- assert output.shape == (1, 3, 6, 6)
35
-
36
-
37
- def test_bottleneck():
38
- """Test the Bottleneck in arcface_arch"""
39
- block = Bottleneck(1, 1, stride=1, downsample=None).cuda()
40
- img = torch.rand((1, 1, 12, 12), dtype=torch.float32).cuda()
41
- output = block(img)
42
- assert output.shape == (1, 4, 12, 12)
43
-
44
- # ----------------- use the downsmaple module--------------- #
45
- downsample = torch.nn.UpsamplingNearest2d(scale_factor=0.5).cuda()
46
- block = Bottleneck(1, 1, stride=2, downsample=downsample).cuda()
47
- img = torch.rand((1, 1, 12, 12), dtype=torch.float32).cuda()
48
- output = block(img)
49
- assert output.shape == (1, 4, 6, 6)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
tests/test_ffhq_degradation_dataset.py DELETED
@@ -1,96 +0,0 @@
1
- import pytest
2
- import yaml
3
-
4
- from gfpgan.data.ffhq_degradation_dataset import FFHQDegradationDataset
5
-
6
-
7
- def test_ffhq_degradation_dataset():
8
-
9
- with open('tests/data/test_ffhq_degradation_dataset.yml', mode='r') as f:
10
- opt = yaml.load(f, Loader=yaml.FullLoader)
11
-
12
- dataset = FFHQDegradationDataset(opt)
13
- assert dataset.io_backend_opt['type'] == 'disk' # io backend
14
- assert len(dataset) == 1 # whether to read correct meta info
15
- assert dataset.kernel_list == ['iso', 'aniso'] # correct initialization the degradation configurations
16
- assert dataset.color_jitter_prob == 1
17
-
18
- # test __getitem__
19
- result = dataset.__getitem__(0)
20
- # check returned keys
21
- expected_keys = ['gt', 'lq', 'gt_path']
22
- assert set(expected_keys).issubset(set(result.keys()))
23
- # check shape and contents
24
- assert result['gt'].shape == (3, 512, 512)
25
- assert result['lq'].shape == (3, 512, 512)
26
- assert result['gt_path'] == 'tests/data/gt/00000000.png'
27
-
28
- # ------------------ test with probability = 0 -------------------- #
29
- opt['color_jitter_prob'] = 0
30
- opt['color_jitter_pt_prob'] = 0
31
- opt['gray_prob'] = 0
32
- opt['io_backend'] = dict(type='disk')
33
- dataset = FFHQDegradationDataset(opt)
34
- assert dataset.io_backend_opt['type'] == 'disk' # io backend
35
- assert len(dataset) == 1 # whether to read correct meta info
36
- assert dataset.kernel_list == ['iso', 'aniso'] # correct initialization the degradation configurations
37
- assert dataset.color_jitter_prob == 0
38
-
39
- # test __getitem__
40
- result = dataset.__getitem__(0)
41
- # check returned keys
42
- expected_keys = ['gt', 'lq', 'gt_path']
43
- assert set(expected_keys).issubset(set(result.keys()))
44
- # check shape and contents
45
- assert result['gt'].shape == (3, 512, 512)
46
- assert result['lq'].shape == (3, 512, 512)
47
- assert result['gt_path'] == 'tests/data/gt/00000000.png'
48
-
49
- # ------------------ test lmdb backend -------------------- #
50
- opt['dataroot_gt'] = 'tests/data/ffhq_gt.lmdb'
51
- opt['io_backend'] = dict(type='lmdb')
52
-
53
- dataset = FFHQDegradationDataset(opt)
54
- assert dataset.io_backend_opt['type'] == 'lmdb' # io backend
55
- assert len(dataset) == 1 # whether to read correct meta info
56
- assert dataset.kernel_list == ['iso', 'aniso'] # correct initialization the degradation configurations
57
- assert dataset.color_jitter_prob == 0
58
-
59
- # test __getitem__
60
- result = dataset.__getitem__(0)
61
- # check returned keys
62
- expected_keys = ['gt', 'lq', 'gt_path']
63
- assert set(expected_keys).issubset(set(result.keys()))
64
- # check shape and contents
65
- assert result['gt'].shape == (3, 512, 512)
66
- assert result['lq'].shape == (3, 512, 512)
67
- assert result['gt_path'] == '00000000'
68
-
69
- # ------------------ test with crop_components -------------------- #
70
- opt['crop_components'] = True
71
- opt['component_path'] = 'tests/data/test_eye_mouth_landmarks.pth'
72
- opt['eye_enlarge_ratio'] = 1.4
73
- opt['gt_gray'] = True
74
- opt['io_backend'] = dict(type='lmdb')
75
-
76
- dataset = FFHQDegradationDataset(opt)
77
- assert dataset.crop_components is True
78
-
79
- # test __getitem__
80
- result = dataset.__getitem__(0)
81
- # check returned keys
82
- expected_keys = ['gt', 'lq', 'gt_path', 'loc_left_eye', 'loc_right_eye', 'loc_mouth']
83
- assert set(expected_keys).issubset(set(result.keys()))
84
- # check shape and contents
85
- assert result['gt'].shape == (3, 512, 512)
86
- assert result['lq'].shape == (3, 512, 512)
87
- assert result['gt_path'] == '00000000'
88
- assert result['loc_left_eye'].shape == (4, )
89
- assert result['loc_right_eye'].shape == (4, )
90
- assert result['loc_mouth'].shape == (4, )
91
-
92
- # ------------------ lmdb backend should have paths ends with lmdb -------------------- #
93
- with pytest.raises(ValueError):
94
- opt['dataroot_gt'] = 'tests/data/gt'
95
- opt['io_backend'] = dict(type='lmdb')
96
- dataset = FFHQDegradationDataset(opt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
tests/test_gfpgan_arch.py DELETED
@@ -1,203 +0,0 @@
1
- import torch
2
-
3
- from gfpgan.archs.gfpganv1_arch import FacialComponentDiscriminator, GFPGANv1, StyleGAN2GeneratorSFT
4
- from gfpgan.archs.gfpganv1_clean_arch import GFPGANv1Clean, StyleGAN2GeneratorCSFT
5
-
6
-
7
- def test_stylegan2generatorsft():
8
- """Test arch: StyleGAN2GeneratorSFT."""
9
-
10
- # model init and forward (gpu)
11
- if torch.cuda.is_available():
12
- net = StyleGAN2GeneratorSFT(
13
- out_size=32,
14
- num_style_feat=512,
15
- num_mlp=8,
16
- channel_multiplier=1,
17
- resample_kernel=(1, 3, 3, 1),
18
- lr_mlp=0.01,
19
- narrow=1,
20
- sft_half=False).cuda().eval()
21
- style = torch.rand((1, 512), dtype=torch.float32).cuda()
22
- condition1 = torch.rand((1, 512, 8, 8), dtype=torch.float32).cuda()
23
- condition2 = torch.rand((1, 512, 16, 16), dtype=torch.float32).cuda()
24
- condition3 = torch.rand((1, 512, 32, 32), dtype=torch.float32).cuda()
25
- conditions = [condition1, condition1, condition2, condition2, condition3, condition3]
26
- output = net([style], conditions)
27
- assert output[0].shape == (1, 3, 32, 32)
28
- assert output[1] is None
29
-
30
- # -------------------- with return_latents ----------------------- #
31
- output = net([style], conditions, return_latents=True)
32
- assert output[0].shape == (1, 3, 32, 32)
33
- assert len(output[1]) == 1
34
- # check latent
35
- assert output[1][0].shape == (8, 512)
36
-
37
- # -------------------- with randomize_noise = False ----------------------- #
38
- output = net([style], conditions, randomize_noise=False)
39
- assert output[0].shape == (1, 3, 32, 32)
40
- assert output[1] is None
41
-
42
- # -------------------- with truncation = 0.5 and mixing----------------------- #
43
- output = net([style, style], conditions, truncation=0.5, truncation_latent=style)
44
- assert output[0].shape == (1, 3, 32, 32)
45
- assert output[1] is None
46
-
47
-
48
- def test_gfpganv1():
49
- """Test arch: GFPGANv1."""
50
-
51
- # model init and forward (gpu)
52
- if torch.cuda.is_available():
53
- net = GFPGANv1(
54
- out_size=32,
55
- num_style_feat=512,
56
- channel_multiplier=1,
57
- resample_kernel=(1, 3, 3, 1),
58
- decoder_load_path=None,
59
- fix_decoder=True,
60
- # for stylegan decoder
61
- num_mlp=8,
62
- lr_mlp=0.01,
63
- input_is_latent=False,
64
- different_w=False,
65
- narrow=1,
66
- sft_half=True).cuda().eval()
67
- img = torch.rand((1, 3, 32, 32), dtype=torch.float32).cuda()
68
- output = net(img)
69
- assert output[0].shape == (1, 3, 32, 32)
70
- assert len(output[1]) == 3
71
- # check out_rgbs for intermediate loss
72
- assert output[1][0].shape == (1, 3, 8, 8)
73
- assert output[1][1].shape == (1, 3, 16, 16)
74
- assert output[1][2].shape == (1, 3, 32, 32)
75
-
76
- # -------------------- with different_w = True ----------------------- #
77
- net = GFPGANv1(
78
- out_size=32,
79
- num_style_feat=512,
80
- channel_multiplier=1,
81
- resample_kernel=(1, 3, 3, 1),
82
- decoder_load_path=None,
83
- fix_decoder=True,
84
- # for stylegan decoder
85
- num_mlp=8,
86
- lr_mlp=0.01,
87
- input_is_latent=False,
88
- different_w=True,
89
- narrow=1,
90
- sft_half=True).cuda().eval()
91
- img = torch.rand((1, 3, 32, 32), dtype=torch.float32).cuda()
92
- output = net(img)
93
- assert output[0].shape == (1, 3, 32, 32)
94
- assert len(output[1]) == 3
95
- # check out_rgbs for intermediate loss
96
- assert output[1][0].shape == (1, 3, 8, 8)
97
- assert output[1][1].shape == (1, 3, 16, 16)
98
- assert output[1][2].shape == (1, 3, 32, 32)
99
-
100
-
101
- def test_facialcomponentdiscriminator():
102
- """Test arch: FacialComponentDiscriminator."""
103
-
104
- # model init and forward (gpu)
105
- if torch.cuda.is_available():
106
- net = FacialComponentDiscriminator().cuda().eval()
107
- img = torch.rand((1, 3, 32, 32), dtype=torch.float32).cuda()
108
- output = net(img)
109
- assert len(output) == 2
110
- assert output[0].shape == (1, 1, 8, 8)
111
- assert output[1] is None
112
-
113
- # -------------------- return intermediate features ----------------------- #
114
- output = net(img, return_feats=True)
115
- assert len(output) == 2
116
- assert output[0].shape == (1, 1, 8, 8)
117
- assert len(output[1]) == 2
118
- assert output[1][0].shape == (1, 128, 16, 16)
119
- assert output[1][1].shape == (1, 256, 8, 8)
120
-
121
-
122
- def test_stylegan2generatorcsft():
123
- """Test arch: StyleGAN2GeneratorCSFT."""
124
-
125
- # model init and forward (gpu)
126
- if torch.cuda.is_available():
127
- net = StyleGAN2GeneratorCSFT(
128
- out_size=32, num_style_feat=512, num_mlp=8, channel_multiplier=1, narrow=1, sft_half=False).cuda().eval()
129
- style = torch.rand((1, 512), dtype=torch.float32).cuda()
130
- condition1 = torch.rand((1, 512, 8, 8), dtype=torch.float32).cuda()
131
- condition2 = torch.rand((1, 512, 16, 16), dtype=torch.float32).cuda()
132
- condition3 = torch.rand((1, 512, 32, 32), dtype=torch.float32).cuda()
133
- conditions = [condition1, condition1, condition2, condition2, condition3, condition3]
134
- output = net([style], conditions)
135
- assert output[0].shape == (1, 3, 32, 32)
136
- assert output[1] is None
137
-
138
- # -------------------- with return_latents ----------------------- #
139
- output = net([style], conditions, return_latents=True)
140
- assert output[0].shape == (1, 3, 32, 32)
141
- assert len(output[1]) == 1
142
- # check latent
143
- assert output[1][0].shape == (8, 512)
144
-
145
- # -------------------- with randomize_noise = False ----------------------- #
146
- output = net([style], conditions, randomize_noise=False)
147
- assert output[0].shape == (1, 3, 32, 32)
148
- assert output[1] is None
149
-
150
- # -------------------- with truncation = 0.5 and mixing----------------------- #
151
- output = net([style, style], conditions, truncation=0.5, truncation_latent=style)
152
- assert output[0].shape == (1, 3, 32, 32)
153
- assert output[1] is None
154
-
155
-
156
- def test_gfpganv1clean():
157
- """Test arch: GFPGANv1Clean."""
158
-
159
- # model init and forward (gpu)
160
- if torch.cuda.is_available():
161
- net = GFPGANv1Clean(
162
- out_size=32,
163
- num_style_feat=512,
164
- channel_multiplier=1,
165
- decoder_load_path=None,
166
- fix_decoder=True,
167
- # for stylegan decoder
168
- num_mlp=8,
169
- input_is_latent=False,
170
- different_w=False,
171
- narrow=1,
172
- sft_half=True).cuda().eval()
173
-
174
- img = torch.rand((1, 3, 32, 32), dtype=torch.float32).cuda()
175
- output = net(img)
176
- assert output[0].shape == (1, 3, 32, 32)
177
- assert len(output[1]) == 3
178
- # check out_rgbs for intermediate loss
179
- assert output[1][0].shape == (1, 3, 8, 8)
180
- assert output[1][1].shape == (1, 3, 16, 16)
181
- assert output[1][2].shape == (1, 3, 32, 32)
182
-
183
- # -------------------- with different_w = True ----------------------- #
184
- net = GFPGANv1Clean(
185
- out_size=32,
186
- num_style_feat=512,
187
- channel_multiplier=1,
188
- decoder_load_path=None,
189
- fix_decoder=True,
190
- # for stylegan decoder
191
- num_mlp=8,
192
- input_is_latent=False,
193
- different_w=True,
194
- narrow=1,
195
- sft_half=True).cuda().eval()
196
- img = torch.rand((1, 3, 32, 32), dtype=torch.float32).cuda()
197
- output = net(img)
198
- assert output[0].shape == (1, 3, 32, 32)
199
- assert len(output[1]) == 3
200
- # check out_rgbs for intermediate loss
201
- assert output[1][0].shape == (1, 3, 8, 8)
202
- assert output[1][1].shape == (1, 3, 16, 16)
203
- assert output[1][2].shape == (1, 3, 32, 32)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
tests/test_gfpgan_model.py DELETED
@@ -1,132 +0,0 @@
1
- import tempfile
2
- import torch
3
- import yaml
4
- from basicsr.archs.stylegan2_arch import StyleGAN2Discriminator
5
- from basicsr.data.paired_image_dataset import PairedImageDataset
6
- from basicsr.losses.losses import GANLoss, L1Loss, PerceptualLoss
7
-
8
- from gfpgan.archs.arcface_arch import ResNetArcFace
9
- from gfpgan.archs.gfpganv1_arch import FacialComponentDiscriminator, GFPGANv1
10
- from gfpgan.models.gfpgan_model import GFPGANModel
11
-
12
-
13
- def test_gfpgan_model():
14
- with open('tests/data/test_gfpgan_model.yml', mode='r') as f:
15
- opt = yaml.load(f, Loader=yaml.FullLoader)
16
-
17
- # build model
18
- model = GFPGANModel(opt)
19
- # test attributes
20
- assert model.__class__.__name__ == 'GFPGANModel'
21
- assert isinstance(model.net_g, GFPGANv1) # generator
22
- assert isinstance(model.net_d, StyleGAN2Discriminator) # discriminator
23
- # facial component discriminators
24
- assert isinstance(model.net_d_left_eye, FacialComponentDiscriminator)
25
- assert isinstance(model.net_d_right_eye, FacialComponentDiscriminator)
26
- assert isinstance(model.net_d_mouth, FacialComponentDiscriminator)
27
- # identity network
28
- assert isinstance(model.network_identity, ResNetArcFace)
29
- # losses
30
- assert isinstance(model.cri_pix, L1Loss)
31
- assert isinstance(model.cri_perceptual, PerceptualLoss)
32
- assert isinstance(model.cri_gan, GANLoss)
33
- assert isinstance(model.cri_l1, L1Loss)
34
- # optimizer
35
- assert isinstance(model.optimizers[0], torch.optim.Adam)
36
- assert isinstance(model.optimizers[1], torch.optim.Adam)
37
-
38
- # prepare data
39
- gt = torch.rand((1, 3, 512, 512), dtype=torch.float32)
40
- lq = torch.rand((1, 3, 512, 512), dtype=torch.float32)
41
- loc_left_eye = torch.rand((1, 4), dtype=torch.float32)
42
- loc_right_eye = torch.rand((1, 4), dtype=torch.float32)
43
- loc_mouth = torch.rand((1, 4), dtype=torch.float32)
44
- data = dict(gt=gt, lq=lq, loc_left_eye=loc_left_eye, loc_right_eye=loc_right_eye, loc_mouth=loc_mouth)
45
- model.feed_data(data)
46
- # check data shape
47
- assert model.lq.shape == (1, 3, 512, 512)
48
- assert model.gt.shape == (1, 3, 512, 512)
49
- assert model.loc_left_eyes.shape == (1, 4)
50
- assert model.loc_right_eyes.shape == (1, 4)
51
- assert model.loc_mouths.shape == (1, 4)
52
-
53
- # ----------------- test optimize_parameters -------------------- #
54
- model.feed_data(data)
55
- model.optimize_parameters(1)
56
- assert model.output.shape == (1, 3, 512, 512)
57
- assert isinstance(model.log_dict, dict)
58
- # check returned keys
59
- expected_keys = [
60
- 'l_g_pix', 'l_g_percep', 'l_g_style', 'l_g_gan', 'l_g_gan_left_eye', 'l_g_gan_right_eye', 'l_g_gan_mouth',
61
- 'l_g_comp_style_loss', 'l_identity', 'l_d', 'real_score', 'fake_score', 'l_d_r1', 'l_d_left_eye',
62
- 'l_d_right_eye', 'l_d_mouth'
63
- ]
64
- assert set(expected_keys).issubset(set(model.log_dict.keys()))
65
-
66
- # ----------------- remove pyramid_loss_weight-------------------- #
67
- model.feed_data(data)
68
- model.optimize_parameters(100000) # large than remove_pyramid_loss = 50000
69
- assert model.output.shape == (1, 3, 512, 512)
70
- assert isinstance(model.log_dict, dict)
71
- # check returned keys
72
- expected_keys = [
73
- 'l_g_pix', 'l_g_percep', 'l_g_style', 'l_g_gan', 'l_g_gan_left_eye', 'l_g_gan_right_eye', 'l_g_gan_mouth',
74
- 'l_g_comp_style_loss', 'l_identity', 'l_d', 'real_score', 'fake_score', 'l_d_r1', 'l_d_left_eye',
75
- 'l_d_right_eye', 'l_d_mouth'
76
- ]
77
- assert set(expected_keys).issubset(set(model.log_dict.keys()))
78
-
79
- # ----------------- test save -------------------- #
80
- with tempfile.TemporaryDirectory() as tmpdir:
81
- model.opt['path']['models'] = tmpdir
82
- model.opt['path']['training_states'] = tmpdir
83
- model.save(0, 1)
84
-
85
- # ----------------- test the test function -------------------- #
86
- model.test()
87
- assert model.output.shape == (1, 3, 512, 512)
88
- # delete net_g_ema
89
- model.__delattr__('net_g_ema')
90
- model.test()
91
- assert model.output.shape == (1, 3, 512, 512)
92
- assert model.net_g.training is True # should back to training mode after testing
93
-
94
- # ----------------- test nondist_validation -------------------- #
95
- # construct dataloader
96
- dataset_opt = dict(
97
- name='Demo',
98
- dataroot_gt='tests/data/gt',
99
- dataroot_lq='tests/data/gt',
100
- io_backend=dict(type='disk'),
101
- scale=4,
102
- phase='val')
103
- dataset = PairedImageDataset(dataset_opt)
104
- dataloader = torch.utils.data.DataLoader(dataset=dataset, batch_size=1, shuffle=False, num_workers=0)
105
- assert model.is_train is True
106
- with tempfile.TemporaryDirectory() as tmpdir:
107
- model.opt['path']['visualization'] = tmpdir
108
- model.nondist_validation(dataloader, 1, None, save_img=True)
109
- assert model.is_train is True
110
- # check metric_results
111
- assert 'psnr' in model.metric_results
112
- assert isinstance(model.metric_results['psnr'], float)
113
-
114
- # validation
115
- with tempfile.TemporaryDirectory() as tmpdir:
116
- model.opt['is_train'] = False
117
- model.opt['val']['suffix'] = 'test'
118
- model.opt['path']['visualization'] = tmpdir
119
- model.opt['val']['pbar'] = True
120
- model.nondist_validation(dataloader, 1, None, save_img=True)
121
- # check metric_results
122
- assert 'psnr' in model.metric_results
123
- assert isinstance(model.metric_results['psnr'], float)
124
-
125
- # if opt['val']['suffix'] is None
126
- model.opt['val']['suffix'] = None
127
- model.opt['name'] = 'demo'
128
- model.opt['path']['visualization'] = tmpdir
129
- model.nondist_validation(dataloader, 1, None, save_img=True)
130
- # check metric_results
131
- assert 'psnr' in model.metric_results
132
- assert isinstance(model.metric_results['psnr'], float)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
tests/test_stylegan2_clean_arch.py DELETED
@@ -1,52 +0,0 @@
1
- import torch
2
-
3
- from gfpgan.archs.stylegan2_clean_arch import StyleGAN2GeneratorClean
4
-
5
-
6
- def test_stylegan2generatorclean():
7
- """Test arch: StyleGAN2GeneratorClean."""
8
-
9
- # model init and forward (gpu)
10
- if torch.cuda.is_available():
11
- net = StyleGAN2GeneratorClean(
12
- out_size=32, num_style_feat=512, num_mlp=8, channel_multiplier=1, narrow=0.5).cuda().eval()
13
- style = torch.rand((1, 512), dtype=torch.float32).cuda()
14
- output = net([style], input_is_latent=False)
15
- assert output[0].shape == (1, 3, 32, 32)
16
- assert output[1] is None
17
-
18
- # -------------------- with return_latents ----------------------- #
19
- output = net([style], input_is_latent=True, return_latents=True)
20
- assert output[0].shape == (1, 3, 32, 32)
21
- assert len(output[1]) == 1
22
- # check latent
23
- assert output[1][0].shape == (8, 512)
24
-
25
- # -------------------- with randomize_noise = False ----------------------- #
26
- output = net([style], randomize_noise=False)
27
- assert output[0].shape == (1, 3, 32, 32)
28
- assert output[1] is None
29
-
30
- # -------------------- with truncation = 0.5 and mixing----------------------- #
31
- output = net([style, style], truncation=0.5, truncation_latent=style)
32
- assert output[0].shape == (1, 3, 32, 32)
33
- assert output[1] is None
34
-
35
- # ------------------ test make_noise ----------------------- #
36
- out = net.make_noise()
37
- assert len(out) == 7
38
- assert out[0].shape == (1, 1, 4, 4)
39
- assert out[1].shape == (1, 1, 8, 8)
40
- assert out[2].shape == (1, 1, 8, 8)
41
- assert out[3].shape == (1, 1, 16, 16)
42
- assert out[4].shape == (1, 1, 16, 16)
43
- assert out[5].shape == (1, 1, 32, 32)
44
- assert out[6].shape == (1, 1, 32, 32)
45
-
46
- # ------------------ test get_latent ----------------------- #
47
- out = net.get_latent(style)
48
- assert out.shape == (1, 512)
49
-
50
- # ------------------ test mean_latent ----------------------- #
51
- out = net.mean_latent(2)
52
- assert out.shape == (1, 512)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
tests/test_utils.py DELETED
@@ -1,43 +0,0 @@
1
- import cv2
2
- from facexlib.utils.face_restoration_helper import FaceRestoreHelper
3
-
4
- from gfpgan.archs.gfpganv1_arch import GFPGANv1
5
- from gfpgan.archs.gfpganv1_clean_arch import GFPGANv1Clean
6
- from gfpgan.utils import GFPGANer
7
-
8
-
9
- def test_gfpganer():
10
- # initialize with the clean model
11
- restorer = GFPGANer(
12
- model_path='experiments/pretrained_models/GFPGANCleanv1-NoCE-C2.pth',
13
- upscale=2,
14
- arch='clean',
15
- channel_multiplier=2,
16
- bg_upsampler=None)
17
- # test attribute
18
- assert isinstance(restorer.gfpgan, GFPGANv1Clean)
19
- assert isinstance(restorer.face_helper, FaceRestoreHelper)
20
-
21
- # initialize with the original model
22
- restorer = GFPGANer(
23
- model_path='experiments/pretrained_models/GFPGANv1.pth',
24
- upscale=2,
25
- arch='original',
26
- channel_multiplier=1,
27
- bg_upsampler=None)
28
- # test attribute
29
- assert isinstance(restorer.gfpgan, GFPGANv1)
30
- assert isinstance(restorer.face_helper, FaceRestoreHelper)
31
-
32
- # ------------------ test enhance ---------------- #
33
- img = cv2.imread('tests/data/gt/00000000.png', cv2.IMREAD_COLOR)
34
- result = restorer.enhance(img, has_aligned=False, paste_back=True)
35
- assert result[0][0].shape == (512, 512, 3)
36
- assert result[1][0].shape == (512, 512, 3)
37
- assert result[2].shape == (1024, 1024, 3)
38
-
39
- # with has_aligned=True
40
- result = restorer.enhance(img, has_aligned=True, paste_back=False)
41
- assert result[0][0].shape == (512, 512, 3)
42
- assert result[1][0].shape == (512, 512, 3)
43
- assert result[2] is None