Spaces:
Sleeping
Sleeping
more changes to the third party lib.
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- MakeItTalk/thirdparty/AdaptiveWingLoss/.gitignore +7 -7
- MakeItTalk/thirdparty/AdaptiveWingLoss/LICENSE +201 -201
- MakeItTalk/thirdparty/AdaptiveWingLoss/README.md +82 -82
- MakeItTalk/thirdparty/AdaptiveWingLoss/core/coord_conv.py +157 -157
- MakeItTalk/thirdparty/AdaptiveWingLoss/core/dataloader.py +368 -368
- MakeItTalk/thirdparty/AdaptiveWingLoss/core/evaler.py +150 -150
- MakeItTalk/thirdparty/AdaptiveWingLoss/core/models.py +228 -228
- MakeItTalk/thirdparty/AdaptiveWingLoss/eval.py +77 -77
- MakeItTalk/thirdparty/AdaptiveWingLoss/requirements.txt +12 -12
- MakeItTalk/thirdparty/AdaptiveWingLoss/scripts/eval_wflw.sh +10 -10
- MakeItTalk/thirdparty/AdaptiveWingLoss/utils/utils.py +354 -354
- MakeItTalk/thirdparty/__pycache__/__init__.cpython-37.pyc +0 -0
- MakeItTalk/thirdparty/face_of_art/CODEOWNERS +1 -1
- MakeItTalk/thirdparty/face_of_art/LICENCE.txt +21 -21
- MakeItTalk/thirdparty/face_of_art/README.md +98 -98
- MakeItTalk/thirdparty/face_of_art/crop_training_set.py +38 -38
- MakeItTalk/thirdparty/face_of_art/data_loading_functions.py +161 -161
- MakeItTalk/thirdparty/face_of_art/deep_heatmaps_model_fusion_net.py +0 -0
- MakeItTalk/thirdparty/face_of_art/deformation_functions.py +386 -386
- MakeItTalk/thirdparty/face_of_art/logging_functions.py +200 -200
- MakeItTalk/thirdparty/face_of_art/menpo_functions.py +299 -299
- MakeItTalk/thirdparty/face_of_art/old/create_artistic_data_in_advance.ipynb +0 -0
- MakeItTalk/thirdparty/face_of_art/old/deep_heatmaps_model_ect.py +544 -544
- MakeItTalk/thirdparty/face_of_art/old/deep_heatmaps_model_primary.py +391 -391
- MakeItTalk/thirdparty/face_of_art/old/eval_scripts/evaluate_and_compare_multiple_models.py +82 -82
- MakeItTalk/thirdparty/face_of_art/old/eval_scripts/evaluate_model.py +54 -54
- MakeItTalk/thirdparty/face_of_art/old/eval_scripts/evaluate_models.py +79 -79
- MakeItTalk/thirdparty/face_of_art/old/eval_scripts/evaluation_functions.py +299 -299
- MakeItTalk/thirdparty/face_of_art/old/image_utils.py +590 -590
- MakeItTalk/thirdparty/face_of_art/old/load_data_module.ipynb +0 -0
- MakeItTalk/thirdparty/face_of_art/old/main.py +46 -46
- MakeItTalk/thirdparty/face_of_art/old/main_fusion.py +122 -122
- MakeItTalk/thirdparty/face_of_art/old/main_fusion_server.py +92 -92
- MakeItTalk/thirdparty/face_of_art/old/main_primary_server.py +89 -89
- MakeItTalk/thirdparty/face_of_art/old/run_tests_template.py +50 -50
- MakeItTalk/thirdparty/face_of_art/old/temp/Untitled.rtf +6 -6
- MakeItTalk/thirdparty/face_of_art/old/temp/create_art_data.py +131 -131
- MakeItTalk/thirdparty/face_of_art/old/temp/create_art_data_functions.py +317 -317
- MakeItTalk/thirdparty/face_of_art/old/temp/deep_heatmaps_model_primary_net.py +0 -0
- MakeItTalk/thirdparty/face_of_art/old/temp/main_primary.py +121 -121
- MakeItTalk/thirdparty/face_of_art/old/temp/predict_landmarks.py +99 -99
- MakeItTalk/thirdparty/face_of_art/old/temp/run_tests_fusion.py +136 -136
- MakeItTalk/thirdparty/face_of_art/old/temp/run_tests_primary.py +130 -130
- MakeItTalk/thirdparty/face_of_art/ops.py +98 -98
- MakeItTalk/thirdparty/face_of_art/pdm_clm_functions.py +203 -203
- MakeItTalk/thirdparty/face_of_art/pdm_clm_models/clm_models/basic_all +0 -0
- MakeItTalk/thirdparty/face_of_art/pdm_clm_models/clm_models/basic_jaw +489 -489
- MakeItTalk/thirdparty/face_of_art/pdm_clm_models/clm_models/basic_l_brow +489 -489
- MakeItTalk/thirdparty/face_of_art/pdm_clm_models/clm_models/basic_l_eye +485 -485
- MakeItTalk/thirdparty/face_of_art/pdm_clm_models/clm_models/basic_mouth +489 -489
MakeItTalk/thirdparty/AdaptiveWingLoss/.gitignore
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
-
# Python generated files
|
2 |
-
*.pyc
|
3 |
-
|
4 |
-
# Project related files
|
5 |
-
ckpt/*.pth
|
6 |
-
dataset/*
|
7 |
-
!dataset/!.py
|
8 |
experiments/*
|
|
|
1 |
+
# Python generated files
|
2 |
+
*.pyc
|
3 |
+
|
4 |
+
# Project related files
|
5 |
+
ckpt/*.pth
|
6 |
+
dataset/*
|
7 |
+
!dataset/!.py
|
8 |
experiments/*
|
MakeItTalk/thirdparty/AdaptiveWingLoss/LICENSE
CHANGED
@@ -1,201 +1,201 @@
|
|
1 |
-
Apache License
|
2 |
-
Version 2.0, January 2004
|
3 |
-
http://www.apache.org/licenses/
|
4 |
-
|
5 |
-
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
6 |
-
|
7 |
-
1. Definitions.
|
8 |
-
|
9 |
-
"License" shall mean the terms and conditions for use, reproduction,
|
10 |
-
and distribution as defined by Sections 1 through 9 of this document.
|
11 |
-
|
12 |
-
"Licensor" shall mean the copyright owner or entity authorized by
|
13 |
-
the copyright owner that is granting the License.
|
14 |
-
|
15 |
-
"Legal Entity" shall mean the union of the acting entity and all
|
16 |
-
other entities that control, are controlled by, or are under common
|
17 |
-
control with that entity. For the purposes of this definition,
|
18 |
-
"control" means (i) the power, direct or indirect, to cause the
|
19 |
-
direction or management of such entity, whether by contract or
|
20 |
-
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
21 |
-
outstanding shares, or (iii) beneficial ownership of such entity.
|
22 |
-
|
23 |
-
"You" (or "Your") shall mean an individual or Legal Entity
|
24 |
-
exercising permissions granted by this License.
|
25 |
-
|
26 |
-
"Source" form shall mean the preferred form for making modifications,
|
27 |
-
including but not limited to software source code, documentation
|
28 |
-
source, and configuration files.
|
29 |
-
|
30 |
-
"Object" form shall mean any form resulting from mechanical
|
31 |
-
transformation or translation of a Source form, including but
|
32 |
-
not limited to compiled object code, generated documentation,
|
33 |
-
and conversions to other media types.
|
34 |
-
|
35 |
-
"Work" shall mean the work of authorship, whether in Source or
|
36 |
-
Object form, made available under the License, as indicated by a
|
37 |
-
copyright notice that is included in or attached to the work
|
38 |
-
(an example is provided in the Appendix below).
|
39 |
-
|
40 |
-
"Derivative Works" shall mean any work, whether in Source or Object
|
41 |
-
form, that is based on (or derived from) the Work and for which the
|
42 |
-
editorial revisions, annotations, elaborations, or other modifications
|
43 |
-
represent, as a whole, an original work of authorship. For the purposes
|
44 |
-
of this License, Derivative Works shall not include works that remain
|
45 |
-
separable from, or merely link (or bind by name) to the interfaces of,
|
46 |
-
the Work and Derivative Works thereof.
|
47 |
-
|
48 |
-
"Contribution" shall mean any work of authorship, including
|
49 |
-
the original version of the Work and any modifications or additions
|
50 |
-
to that Work or Derivative Works thereof, that is intentionally
|
51 |
-
submitted to Licensor for inclusion in the Work by the copyright owner
|
52 |
-
or by an individual or Legal Entity authorized to submit on behalf of
|
53 |
-
the copyright owner. For the purposes of this definition, "submitted"
|
54 |
-
means any form of electronic, verbal, or written communication sent
|
55 |
-
to the Licensor or its representatives, including but not limited to
|
56 |
-
communication on electronic mailing lists, source code control systems,
|
57 |
-
and issue tracking systems that are managed by, or on behalf of, the
|
58 |
-
Licensor for the purpose of discussing and improving the Work, but
|
59 |
-
excluding communication that is conspicuously marked or otherwise
|
60 |
-
designated in writing by the copyright owner as "Not a Contribution."
|
61 |
-
|
62 |
-
"Contributor" shall mean Licensor and any individual or Legal Entity
|
63 |
-
on behalf of whom a Contribution has been received by Licensor and
|
64 |
-
subsequently incorporated within the Work.
|
65 |
-
|
66 |
-
2. Grant of Copyright License. Subject to the terms and conditions of
|
67 |
-
this License, each Contributor hereby grants to You a perpetual,
|
68 |
-
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
69 |
-
copyright license to reproduce, prepare Derivative Works of,
|
70 |
-
publicly display, publicly perform, sublicense, and distribute the
|
71 |
-
Work and such Derivative Works in Source or Object form.
|
72 |
-
|
73 |
-
3. Grant of Patent License. Subject to the terms and conditions of
|
74 |
-
this License, each Contributor hereby grants to You a perpetual,
|
75 |
-
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
76 |
-
(except as stated in this section) patent license to make, have made,
|
77 |
-
use, offer to sell, sell, import, and otherwise transfer the Work,
|
78 |
-
where such license applies only to those patent claims licensable
|
79 |
-
by such Contributor that are necessarily infringed by their
|
80 |
-
Contribution(s) alone or by combination of their Contribution(s)
|
81 |
-
with the Work to which such Contribution(s) was submitted. If You
|
82 |
-
institute patent litigation against any entity (including a
|
83 |
-
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
84 |
-
or a Contribution incorporated within the Work constitutes direct
|
85 |
-
or contributory patent infringement, then any patent licenses
|
86 |
-
granted to You under this License for that Work shall terminate
|
87 |
-
as of the date such litigation is filed.
|
88 |
-
|
89 |
-
4. Redistribution. You may reproduce and distribute copies of the
|
90 |
-
Work or Derivative Works thereof in any medium, with or without
|
91 |
-
modifications, and in Source or Object form, provided that You
|
92 |
-
meet the following conditions:
|
93 |
-
|
94 |
-
(a) You must give any other recipients of the Work or
|
95 |
-
Derivative Works a copy of this License; and
|
96 |
-
|
97 |
-
(b) You must cause any modified files to carry prominent notices
|
98 |
-
stating that You changed the files; and
|
99 |
-
|
100 |
-
(c) You must retain, in the Source form of any Derivative Works
|
101 |
-
that You distribute, all copyright, patent, trademark, and
|
102 |
-
attribution notices from the Source form of the Work,
|
103 |
-
excluding those notices that do not pertain to any part of
|
104 |
-
the Derivative Works; and
|
105 |
-
|
106 |
-
(d) If the Work includes a "NOTICE" text file as part of its
|
107 |
-
distribution, then any Derivative Works that You distribute must
|
108 |
-
include a readable copy of the attribution notices contained
|
109 |
-
within such NOTICE file, excluding those notices that do not
|
110 |
-
pertain to any part of the Derivative Works, in at least one
|
111 |
-
of the following places: within a NOTICE text file distributed
|
112 |
-
as part of the Derivative Works; within the Source form or
|
113 |
-
documentation, if provided along with the Derivative Works; or,
|
114 |
-
within a display generated by the Derivative Works, if and
|
115 |
-
wherever such third-party notices normally appear. The contents
|
116 |
-
of the NOTICE file are for informational purposes only and
|
117 |
-
do not modify the License. You may add Your own attribution
|
118 |
-
notices within Derivative Works that You distribute, alongside
|
119 |
-
or as an addendum to the NOTICE text from the Work, provided
|
120 |
-
that such additional attribution notices cannot be construed
|
121 |
-
as modifying the License.
|
122 |
-
|
123 |
-
You may add Your own copyright statement to Your modifications and
|
124 |
-
may provide additional or different license terms and conditions
|
125 |
-
for use, reproduction, or distribution of Your modifications, or
|
126 |
-
for any such Derivative Works as a whole, provided Your use,
|
127 |
-
reproduction, and distribution of the Work otherwise complies with
|
128 |
-
the conditions stated in this License.
|
129 |
-
|
130 |
-
5. Submission of Contributions. Unless You explicitly state otherwise,
|
131 |
-
any Contribution intentionally submitted for inclusion in the Work
|
132 |
-
by You to the Licensor shall be under the terms and conditions of
|
133 |
-
this License, without any additional terms or conditions.
|
134 |
-
Notwithstanding the above, nothing herein shall supersede or modify
|
135 |
-
the terms of any separate license agreement you may have executed
|
136 |
-
with Licensor regarding such Contributions.
|
137 |
-
|
138 |
-
6. Trademarks. This License does not grant permission to use the trade
|
139 |
-
names, trademarks, service marks, or product names of the Licensor,
|
140 |
-
except as required for reasonable and customary use in describing the
|
141 |
-
origin of the Work and reproducing the content of the NOTICE file.
|
142 |
-
|
143 |
-
7. Disclaimer of Warranty. Unless required by applicable law or
|
144 |
-
agreed to in writing, Licensor provides the Work (and each
|
145 |
-
Contributor provides its Contributions) on an "AS IS" BASIS,
|
146 |
-
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
147 |
-
implied, including, without limitation, any warranties or conditions
|
148 |
-
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
149 |
-
PARTICULAR PURPOSE. You are solely responsible for determining the
|
150 |
-
appropriateness of using or redistributing the Work and assume any
|
151 |
-
risks associated with Your exercise of permissions under this License.
|
152 |
-
|
153 |
-
8. Limitation of Liability. In no event and under no legal theory,
|
154 |
-
whether in tort (including negligence), contract, or otherwise,
|
155 |
-
unless required by applicable law (such as deliberate and grossly
|
156 |
-
negligent acts) or agreed to in writing, shall any Contributor be
|
157 |
-
liable to You for damages, including any direct, indirect, special,
|
158 |
-
incidental, or consequential damages of any character arising as a
|
159 |
-
result of this License or out of the use or inability to use the
|
160 |
-
Work (including but not limited to damages for loss of goodwill,
|
161 |
-
work stoppage, computer failure or malfunction, or any and all
|
162 |
-
other commercial damages or losses), even if such Contributor
|
163 |
-
has been advised of the possibility of such damages.
|
164 |
-
|
165 |
-
9. Accepting Warranty or Additional Liability. While redistributing
|
166 |
-
the Work or Derivative Works thereof, You may choose to offer,
|
167 |
-
and charge a fee for, acceptance of support, warranty, indemnity,
|
168 |
-
or other liability obligations and/or rights consistent with this
|
169 |
-
License. However, in accepting such obligations, You may act only
|
170 |
-
on Your own behalf and on Your sole responsibility, not on behalf
|
171 |
-
of any other Contributor, and only if You agree to indemnify,
|
172 |
-
defend, and hold each Contributor harmless for any liability
|
173 |
-
incurred by, or claims asserted against, such Contributor by reason
|
174 |
-
of your accepting any such warranty or additional liability.
|
175 |
-
|
176 |
-
END OF TERMS AND CONDITIONS
|
177 |
-
|
178 |
-
APPENDIX: How to apply the Apache License to your work.
|
179 |
-
|
180 |
-
To apply the Apache License to your work, attach the following
|
181 |
-
boilerplate notice, with the fields enclosed by brackets "[]"
|
182 |
-
replaced with your own identifying information. (Don't include
|
183 |
-
the brackets!) The text should be enclosed in the appropriate
|
184 |
-
comment syntax for the file format. We also recommend that a
|
185 |
-
file or class name and description of purpose be included on the
|
186 |
-
same "printed page" as the copyright notice for easier
|
187 |
-
identification within third-party archives.
|
188 |
-
|
189 |
-
Copyright [yyyy] [name of copyright owner]
|
190 |
-
|
191 |
-
Licensed under the Apache License, Version 2.0 (the "License");
|
192 |
-
you may not use this file except in compliance with the License.
|
193 |
-
You may obtain a copy of the License at
|
194 |
-
|
195 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
196 |
-
|
197 |
-
Unless required by applicable law or agreed to in writing, software
|
198 |
-
distributed under the License is distributed on an "AS IS" BASIS,
|
199 |
-
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
200 |
-
See the License for the specific language governing permissions and
|
201 |
-
limitations under the License.
|
|
|
1 |
+
Apache License
|
2 |
+
Version 2.0, January 2004
|
3 |
+
http://www.apache.org/licenses/
|
4 |
+
|
5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
6 |
+
|
7 |
+
1. Definitions.
|
8 |
+
|
9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
11 |
+
|
12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
13 |
+
the copyright owner that is granting the License.
|
14 |
+
|
15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
16 |
+
other entities that control, are controlled by, or are under common
|
17 |
+
control with that entity. For the purposes of this definition,
|
18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
19 |
+
direction or management of such entity, whether by contract or
|
20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
22 |
+
|
23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
24 |
+
exercising permissions granted by this License.
|
25 |
+
|
26 |
+
"Source" form shall mean the preferred form for making modifications,
|
27 |
+
including but not limited to software source code, documentation
|
28 |
+
source, and configuration files.
|
29 |
+
|
30 |
+
"Object" form shall mean any form resulting from mechanical
|
31 |
+
transformation or translation of a Source form, including but
|
32 |
+
not limited to compiled object code, generated documentation,
|
33 |
+
and conversions to other media types.
|
34 |
+
|
35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
36 |
+
Object form, made available under the License, as indicated by a
|
37 |
+
copyright notice that is included in or attached to the work
|
38 |
+
(an example is provided in the Appendix below).
|
39 |
+
|
40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
41 |
+
form, that is based on (or derived from) the Work and for which the
|
42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
44 |
+
of this License, Derivative Works shall not include works that remain
|
45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
46 |
+
the Work and Derivative Works thereof.
|
47 |
+
|
48 |
+
"Contribution" shall mean any work of authorship, including
|
49 |
+
the original version of the Work and any modifications or additions
|
50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
54 |
+
means any form of electronic, verbal, or written communication sent
|
55 |
+
to the Licensor or its representatives, including but not limited to
|
56 |
+
communication on electronic mailing lists, source code control systems,
|
57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
59 |
+
excluding communication that is conspicuously marked or otherwise
|
60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
61 |
+
|
62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
64 |
+
subsequently incorporated within the Work.
|
65 |
+
|
66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
71 |
+
Work and such Derivative Works in Source or Object form.
|
72 |
+
|
73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
76 |
+
(except as stated in this section) patent license to make, have made,
|
77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
78 |
+
where such license applies only to those patent claims licensable
|
79 |
+
by such Contributor that are necessarily infringed by their
|
80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
82 |
+
institute patent litigation against any entity (including a
|
83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
84 |
+
or a Contribution incorporated within the Work constitutes direct
|
85 |
+
or contributory patent infringement, then any patent licenses
|
86 |
+
granted to You under this License for that Work shall terminate
|
87 |
+
as of the date such litigation is filed.
|
88 |
+
|
89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
90 |
+
Work or Derivative Works thereof in any medium, with or without
|
91 |
+
modifications, and in Source or Object form, provided that You
|
92 |
+
meet the following conditions:
|
93 |
+
|
94 |
+
(a) You must give any other recipients of the Work or
|
95 |
+
Derivative Works a copy of this License; and
|
96 |
+
|
97 |
+
(b) You must cause any modified files to carry prominent notices
|
98 |
+
stating that You changed the files; and
|
99 |
+
|
100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
101 |
+
that You distribute, all copyright, patent, trademark, and
|
102 |
+
attribution notices from the Source form of the Work,
|
103 |
+
excluding those notices that do not pertain to any part of
|
104 |
+
the Derivative Works; and
|
105 |
+
|
106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
107 |
+
distribution, then any Derivative Works that You distribute must
|
108 |
+
include a readable copy of the attribution notices contained
|
109 |
+
within such NOTICE file, excluding those notices that do not
|
110 |
+
pertain to any part of the Derivative Works, in at least one
|
111 |
+
of the following places: within a NOTICE text file distributed
|
112 |
+
as part of the Derivative Works; within the Source form or
|
113 |
+
documentation, if provided along with the Derivative Works; or,
|
114 |
+
within a display generated by the Derivative Works, if and
|
115 |
+
wherever such third-party notices normally appear. The contents
|
116 |
+
of the NOTICE file are for informational purposes only and
|
117 |
+
do not modify the License. You may add Your own attribution
|
118 |
+
notices within Derivative Works that You distribute, alongside
|
119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
120 |
+
that such additional attribution notices cannot be construed
|
121 |
+
as modifying the License.
|
122 |
+
|
123 |
+
You may add Your own copyright statement to Your modifications and
|
124 |
+
may provide additional or different license terms and conditions
|
125 |
+
for use, reproduction, or distribution of Your modifications, or
|
126 |
+
for any such Derivative Works as a whole, provided Your use,
|
127 |
+
reproduction, and distribution of the Work otherwise complies with
|
128 |
+
the conditions stated in this License.
|
129 |
+
|
130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
132 |
+
by You to the Licensor shall be under the terms and conditions of
|
133 |
+
this License, without any additional terms or conditions.
|
134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
135 |
+
the terms of any separate license agreement you may have executed
|
136 |
+
with Licensor regarding such Contributions.
|
137 |
+
|
138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
140 |
+
except as required for reasonable and customary use in describing the
|
141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
142 |
+
|
143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
144 |
+
agreed to in writing, Licensor provides the Work (and each
|
145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
147 |
+
implied, including, without limitation, any warranties or conditions
|
148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
150 |
+
appropriateness of using or redistributing the Work and assume any
|
151 |
+
risks associated with Your exercise of permissions under this License.
|
152 |
+
|
153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
154 |
+
whether in tort (including negligence), contract, or otherwise,
|
155 |
+
unless required by applicable law (such as deliberate and grossly
|
156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
157 |
+
liable to You for damages, including any direct, indirect, special,
|
158 |
+
incidental, or consequential damages of any character arising as a
|
159 |
+
result of this License or out of the use or inability to use the
|
160 |
+
Work (including but not limited to damages for loss of goodwill,
|
161 |
+
work stoppage, computer failure or malfunction, or any and all
|
162 |
+
other commercial damages or losses), even if such Contributor
|
163 |
+
has been advised of the possibility of such damages.
|
164 |
+
|
165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
168 |
+
or other liability obligations and/or rights consistent with this
|
169 |
+
License. However, in accepting such obligations, You may act only
|
170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
171 |
+
of any other Contributor, and only if You agree to indemnify,
|
172 |
+
defend, and hold each Contributor harmless for any liability
|
173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
174 |
+
of your accepting any such warranty or additional liability.
|
175 |
+
|
176 |
+
END OF TERMS AND CONDITIONS
|
177 |
+
|
178 |
+
APPENDIX: How to apply the Apache License to your work.
|
179 |
+
|
180 |
+
To apply the Apache License to your work, attach the following
|
181 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
182 |
+
replaced with your own identifying information. (Don't include
|
183 |
+
the brackets!) The text should be enclosed in the appropriate
|
184 |
+
comment syntax for the file format. We also recommend that a
|
185 |
+
file or class name and description of purpose be included on the
|
186 |
+
same "printed page" as the copyright notice for easier
|
187 |
+
identification within third-party archives.
|
188 |
+
|
189 |
+
Copyright [yyyy] [name of copyright owner]
|
190 |
+
|
191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
192 |
+
you may not use this file except in compliance with the License.
|
193 |
+
You may obtain a copy of the License at
|
194 |
+
|
195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
196 |
+
|
197 |
+
Unless required by applicable law or agreed to in writing, software
|
198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
200 |
+
See the License for the specific language governing permissions and
|
201 |
+
limitations under the License.
|
MakeItTalk/thirdparty/AdaptiveWingLoss/README.md
CHANGED
@@ -1,82 +1,82 @@
|
|
1 |
-
# AdaptiveWingLoss
|
2 |
-
## [arXiv](https://arxiv.org/abs/1904.07399)
|
3 |
-
Pytorch Implementation of Adaptive Wing Loss for Robust Face Alignment via Heatmap Regression.
|
4 |
-
|
5 |
-
<img src='images/wflw.png' width="1000px">
|
6 |
-
|
7 |
-
## Update Logs:
|
8 |
-
### October 28, 2019
|
9 |
-
* Pretrained Model and evaluation code on WFLW dataset is released.
|
10 |
-
|
11 |
-
## Installation
|
12 |
-
#### Note: Code was originally developed under Python2.X and Pytorch 0.4. This released version was revisioned from original code and was tested on Python3.5.7 and Pytorch 1.3.0.
|
13 |
-
|
14 |
-
Install system requirements:
|
15 |
-
```
|
16 |
-
sudo apt-get install python3-dev python3-pip python3-tk libglib2.0-0
|
17 |
-
```
|
18 |
-
|
19 |
-
Install python dependencies:
|
20 |
-
```
|
21 |
-
pip3 install -r requirements.txt
|
22 |
-
```
|
23 |
-
|
24 |
-
## Run Evaluation on WFLW dataset
|
25 |
-
1. Download and process WFLW dataset
|
26 |
-
* Download WFLW dataset and annotation from [Here](https://wywu.github.io/projects/LAB/WFLW.html).
|
27 |
-
* Unzip WFLW dataset and annotations and move files into ```./dataset``` directory. Your directory should look like this:
|
28 |
-
```
|
29 |
-
AdaptiveWingLoss
|
30 |
-
└───dataset
|
31 |
-
│
|
32 |
-
└───WFLW_annotations
|
33 |
-
│ └───list_98pt_rect_attr_train_test
|
34 |
-
│ │
|
35 |
-
│ └───list_98pt_test
|
36 |
-
│
|
37 |
-
└───WFLW_images
|
38 |
-
└───0--Parade
|
39 |
-
│
|
40 |
-
└───...
|
41 |
-
```
|
42 |
-
* Inside ```./dataset``` directory, run:
|
43 |
-
```
|
44 |
-
python convert_WFLW.py
|
45 |
-
```
|
46 |
-
A new directory ```./dataset/WFLW_test``` should be generated with 2500 processed testing images and corresponding landmarks.
|
47 |
-
|
48 |
-
2. Download pretrained model from [Google Drive](https://drive.google.com/file/d/1HZaSjLoorQ4QCEx7PRTxOmg0bBPYSqhH/view?usp=sharing) and put it in ```./ckpt``` directory.
|
49 |
-
|
50 |
-
3. Within ```./Scripts``` directory, run following command:
|
51 |
-
```
|
52 |
-
sh eval_wflw.sh
|
53 |
-
```
|
54 |
-
|
55 |
-
<img src='images/wflw_table.png' width="800px">
|
56 |
-
*GTBbox indicates the ground truth landmarks are used as bounding box to crop faces.
|
57 |
-
|
58 |
-
## Future Plans
|
59 |
-
- [x] Release evaluation code and pretrained model on WFLW dataset.
|
60 |
-
|
61 |
-
- [ ] Release training code on WFLW dataset.
|
62 |
-
|
63 |
-
- [ ] Release pretrained model and code on 300W, AFLW and COFW dataset.
|
64 |
-
|
65 |
-
- [ ] Replease facial landmark detection API
|
66 |
-
|
67 |
-
|
68 |
-
## Citation
|
69 |
-
If you find this useful for your research, please cite the following paper.
|
70 |
-
|
71 |
-
```
|
72 |
-
@InProceedings{Wang_2019_ICCV,
|
73 |
-
author = {Wang, Xinyao and Bo, Liefeng and Fuxin, Li},
|
74 |
-
title = {Adaptive Wing Loss for Robust Face Alignment via Heatmap Regression},
|
75 |
-
booktitle = {The IEEE International Conference on Computer Vision (ICCV)},
|
76 |
-
month = {October},
|
77 |
-
year = {2019}
|
78 |
-
}
|
79 |
-
```
|
80 |
-
|
81 |
-
## Acknowledgments
|
82 |
-
This repository borrows or partially modifies hourglass model and data processing code from [face alignment](https://github.com/1adrianb/face-alignment) and [pose-hg-train](https://github.com/princeton-vl/pose-hg-train).
|
|
|
1 |
+
# AdaptiveWingLoss
|
2 |
+
## [arXiv](https://arxiv.org/abs/1904.07399)
|
3 |
+
Pytorch Implementation of Adaptive Wing Loss for Robust Face Alignment via Heatmap Regression.
|
4 |
+
|
5 |
+
<img src='images/wflw.png' width="1000px">
|
6 |
+
|
7 |
+
## Update Logs:
|
8 |
+
### October 28, 2019
|
9 |
+
* Pretrained Model and evaluation code on WFLW dataset is released.
|
10 |
+
|
11 |
+
## Installation
|
12 |
+
#### Note: Code was originally developed under Python2.X and Pytorch 0.4. This released version was revisioned from original code and was tested on Python3.5.7 and Pytorch 1.3.0.
|
13 |
+
|
14 |
+
Install system requirements:
|
15 |
+
```
|
16 |
+
sudo apt-get install python3-dev python3-pip python3-tk libglib2.0-0
|
17 |
+
```
|
18 |
+
|
19 |
+
Install python dependencies:
|
20 |
+
```
|
21 |
+
pip3 install -r requirements.txt
|
22 |
+
```
|
23 |
+
|
24 |
+
## Run Evaluation on WFLW dataset
|
25 |
+
1. Download and process WFLW dataset
|
26 |
+
* Download WFLW dataset and annotation from [Here](https://wywu.github.io/projects/LAB/WFLW.html).
|
27 |
+
* Unzip WFLW dataset and annotations and move files into ```./dataset``` directory. Your directory should look like this:
|
28 |
+
```
|
29 |
+
AdaptiveWingLoss
|
30 |
+
└───dataset
|
31 |
+
│
|
32 |
+
└───WFLW_annotations
|
33 |
+
│ └───list_98pt_rect_attr_train_test
|
34 |
+
│ │
|
35 |
+
│ └───list_98pt_test
|
36 |
+
│
|
37 |
+
└───WFLW_images
|
38 |
+
└───0--Parade
|
39 |
+
│
|
40 |
+
└───...
|
41 |
+
```
|
42 |
+
* Inside ```./dataset``` directory, run:
|
43 |
+
```
|
44 |
+
python convert_WFLW.py
|
45 |
+
```
|
46 |
+
A new directory ```./dataset/WFLW_test``` should be generated with 2500 processed testing images and corresponding landmarks.
|
47 |
+
|
48 |
+
2. Download pretrained model from [Google Drive](https://drive.google.com/file/d/1HZaSjLoorQ4QCEx7PRTxOmg0bBPYSqhH/view?usp=sharing) and put it in ```./ckpt``` directory.
|
49 |
+
|
50 |
+
3. Within ```./Scripts``` directory, run following command:
|
51 |
+
```
|
52 |
+
sh eval_wflw.sh
|
53 |
+
```
|
54 |
+
|
55 |
+
<img src='images/wflw_table.png' width="800px">
|
56 |
+
*GTBbox indicates the ground truth landmarks are used as bounding box to crop faces.
|
57 |
+
|
58 |
+
## Future Plans
|
59 |
+
- [x] Release evaluation code and pretrained model on WFLW dataset.
|
60 |
+
|
61 |
+
- [ ] Release training code on WFLW dataset.
|
62 |
+
|
63 |
+
- [ ] Release pretrained model and code on 300W, AFLW and COFW dataset.
|
64 |
+
|
65 |
+
- [ ] Replease facial landmark detection API
|
66 |
+
|
67 |
+
|
68 |
+
## Citation
|
69 |
+
If you find this useful for your research, please cite the following paper.
|
70 |
+
|
71 |
+
```
|
72 |
+
@InProceedings{Wang_2019_ICCV,
|
73 |
+
author = {Wang, Xinyao and Bo, Liefeng and Fuxin, Li},
|
74 |
+
title = {Adaptive Wing Loss for Robust Face Alignment via Heatmap Regression},
|
75 |
+
booktitle = {The IEEE International Conference on Computer Vision (ICCV)},
|
76 |
+
month = {October},
|
77 |
+
year = {2019}
|
78 |
+
}
|
79 |
+
```
|
80 |
+
|
81 |
+
## Acknowledgments
|
82 |
+
This repository borrows or partially modifies hourglass model and data processing code from [face alignment](https://github.com/1adrianb/face-alignment) and [pose-hg-train](https://github.com/princeton-vl/pose-hg-train).
|
MakeItTalk/thirdparty/AdaptiveWingLoss/core/coord_conv.py
CHANGED
@@ -1,157 +1,157 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
7 |
-
|
8 |
-
class AddCoordsTh(nn.Module):
|
9 |
-
def __init__(self, x_dim=64, y_dim=64, with_r=False, with_boundary=False):
|
10 |
-
super(AddCoordsTh, self).__init__()
|
11 |
-
self.x_dim = x_dim
|
12 |
-
self.y_dim = y_dim
|
13 |
-
self.with_r = with_r
|
14 |
-
self.with_boundary = with_boundary
|
15 |
-
|
16 |
-
def forward(self, input_tensor, heatmap=None):
|
17 |
-
"""
|
18 |
-
input_tensor: (batch, c, x_dim, y_dim)
|
19 |
-
"""
|
20 |
-
batch_size_tensor = input_tensor.shape[0]
|
21 |
-
|
22 |
-
xx_ones = torch.ones([1, self.y_dim], dtype=torch.int32).to(device)
|
23 |
-
xx_ones = xx_ones.unsqueeze(-1)
|
24 |
-
|
25 |
-
xx_range = torch.arange(self.x_dim, dtype=torch.int32).unsqueeze(0).to(device)
|
26 |
-
xx_range = xx_range.unsqueeze(1)
|
27 |
-
|
28 |
-
xx_channel = torch.matmul(xx_ones.float(), xx_range.float())
|
29 |
-
xx_channel = xx_channel.unsqueeze(-1)
|
30 |
-
|
31 |
-
|
32 |
-
yy_ones = torch.ones([1, self.x_dim], dtype=torch.int32).to(device)
|
33 |
-
yy_ones = yy_ones.unsqueeze(1)
|
34 |
-
|
35 |
-
yy_range = torch.arange(self.y_dim, dtype=torch.int32).unsqueeze(0).to(device)
|
36 |
-
yy_range = yy_range.unsqueeze(-1)
|
37 |
-
|
38 |
-
yy_channel = torch.matmul(yy_range.float(), yy_ones.float())
|
39 |
-
yy_channel = yy_channel.unsqueeze(-1)
|
40 |
-
|
41 |
-
xx_channel = xx_channel.permute(0, 3, 2, 1)
|
42 |
-
yy_channel = yy_channel.permute(0, 3, 2, 1)
|
43 |
-
|
44 |
-
xx_channel = xx_channel / (self.x_dim - 1)
|
45 |
-
yy_channel = yy_channel / (self.y_dim - 1)
|
46 |
-
|
47 |
-
xx_channel = xx_channel * 2 - 1
|
48 |
-
yy_channel = yy_channel * 2 - 1
|
49 |
-
|
50 |
-
xx_channel = xx_channel.repeat(batch_size_tensor, 1, 1, 1)
|
51 |
-
yy_channel = yy_channel.repeat(batch_size_tensor, 1, 1, 1)
|
52 |
-
|
53 |
-
if self.with_boundary and type(heatmap) != type(None):
|
54 |
-
boundary_channel = torch.clamp(heatmap[:, -1:, :, :],
|
55 |
-
0.0, 1.0)
|
56 |
-
|
57 |
-
zero_tensor = torch.zeros_like(xx_channel)
|
58 |
-
xx_boundary_channel = torch.where(boundary_channel>0.05,
|
59 |
-
xx_channel, zero_tensor)
|
60 |
-
yy_boundary_channel = torch.where(boundary_channel>0.05,
|
61 |
-
yy_channel, zero_tensor)
|
62 |
-
if self.with_boundary and type(heatmap) != type(None):
|
63 |
-
xx_boundary_channel = xx_boundary_channel.to(device)
|
64 |
-
yy_boundary_channel = yy_boundary_channel.to(device)
|
65 |
-
|
66 |
-
ret = torch.cat([input_tensor, xx_channel, yy_channel], dim=1)
|
67 |
-
|
68 |
-
|
69 |
-
if self.with_r:
|
70 |
-
rr = torch.sqrt(torch.pow(xx_channel, 2) + torch.pow(yy_channel, 2))
|
71 |
-
rr = rr / torch.max(rr)
|
72 |
-
ret = torch.cat([ret, rr], dim=1)
|
73 |
-
|
74 |
-
if self.with_boundary and type(heatmap) != type(None):
|
75 |
-
ret = torch.cat([ret, xx_boundary_channel,
|
76 |
-
yy_boundary_channel], dim=1)
|
77 |
-
return ret
|
78 |
-
|
79 |
-
|
80 |
-
class CoordConvTh(nn.Module):
|
81 |
-
"""CoordConv layer as in the paper."""
|
82 |
-
def __init__(self, x_dim, y_dim, with_r, with_boundary,
|
83 |
-
in_channels, first_one=False, *args, **kwargs):
|
84 |
-
super(CoordConvTh, self).__init__()
|
85 |
-
self.addcoords = AddCoordsTh(x_dim=x_dim, y_dim=y_dim, with_r=with_r,
|
86 |
-
with_boundary=with_boundary)
|
87 |
-
in_channels += 2
|
88 |
-
if with_r:
|
89 |
-
in_channels += 1
|
90 |
-
if with_boundary and not first_one:
|
91 |
-
in_channels += 2
|
92 |
-
self.conv = nn.Conv2d(in_channels=in_channels, *args, **kwargs)
|
93 |
-
|
94 |
-
def forward(self, input_tensor, heatmap=None):
|
95 |
-
ret = self.addcoords(input_tensor, heatmap)
|
96 |
-
last_channel = ret[:, -2:, :, :]
|
97 |
-
ret = self.conv(ret)
|
98 |
-
return ret, last_channel
|
99 |
-
|
100 |
-
|
101 |
-
'''
|
102 |
-
An alternative implementation for PyTorch with auto-infering the x-y dimensions.
|
103 |
-
'''
|
104 |
-
class AddCoords(nn.Module):
|
105 |
-
|
106 |
-
def __init__(self, with_r=False):
|
107 |
-
super().__init__()
|
108 |
-
self.with_r = with_r
|
109 |
-
|
110 |
-
def forward(self, input_tensor):
|
111 |
-
"""
|
112 |
-
Args:
|
113 |
-
input_tensor: shape(batch, channel, x_dim, y_dim)
|
114 |
-
"""
|
115 |
-
batch_size, _, x_dim, y_dim = input_tensor.size()
|
116 |
-
|
117 |
-
xx_channel = torch.arange(x_dim).repeat(1, y_dim, 1)
|
118 |
-
yy_channel = torch.arange(y_dim).repeat(1, x_dim, 1).transpose(1, 2)
|
119 |
-
|
120 |
-
xx_channel = xx_channel / (x_dim - 1)
|
121 |
-
yy_channel = yy_channel / (y_dim - 1)
|
122 |
-
|
123 |
-
xx_channel = xx_channel * 2 - 1
|
124 |
-
yy_channel = yy_channel * 2 - 1
|
125 |
-
|
126 |
-
xx_channel = xx_channel.repeat(batch_size, 1, 1, 1).transpose(2, 3)
|
127 |
-
yy_channel = yy_channel.repeat(batch_size, 1, 1, 1).transpose(2, 3)
|
128 |
-
|
129 |
-
if input_tensor.is_cuda:
|
130 |
-
xx_channel = xx_channel.to(device)
|
131 |
-
yy_channel = yy_channel.to(device)
|
132 |
-
|
133 |
-
ret = torch.cat([
|
134 |
-
input_tensor,
|
135 |
-
xx_channel.type_as(input_tensor),
|
136 |
-
yy_channel.type_as(input_tensor)], dim=1)
|
137 |
-
|
138 |
-
if self.with_r:
|
139 |
-
rr = torch.sqrt(torch.pow(xx_channel - 0.5, 2) + torch.pow(yy_channel - 0.5, 2))
|
140 |
-
if input_tensor.is_cuda:
|
141 |
-
rr = rr.to(device)
|
142 |
-
ret = torch.cat([ret, rr], dim=1)
|
143 |
-
|
144 |
-
return ret
|
145 |
-
|
146 |
-
|
147 |
-
class CoordConv(nn.Module):
|
148 |
-
|
149 |
-
def __init__(self, in_channels, out_channels, with_r=False, **kwargs):
|
150 |
-
super().__init__()
|
151 |
-
self.addcoords = AddCoords(with_r=with_r)
|
152 |
-
self.conv = nn.Conv2d(in_channels + 2, out_channels, **kwargs)
|
153 |
-
|
154 |
-
def forward(self, x):
|
155 |
-
ret = self.addcoords(x)
|
156 |
-
ret = self.conv(ret)
|
157 |
-
return ret
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
|
4 |
+
|
5 |
+
|
6 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
7 |
+
|
8 |
+
class AddCoordsTh(nn.Module):
|
9 |
+
def __init__(self, x_dim=64, y_dim=64, with_r=False, with_boundary=False):
|
10 |
+
super(AddCoordsTh, self).__init__()
|
11 |
+
self.x_dim = x_dim
|
12 |
+
self.y_dim = y_dim
|
13 |
+
self.with_r = with_r
|
14 |
+
self.with_boundary = with_boundary
|
15 |
+
|
16 |
+
def forward(self, input_tensor, heatmap=None):
|
17 |
+
"""
|
18 |
+
input_tensor: (batch, c, x_dim, y_dim)
|
19 |
+
"""
|
20 |
+
batch_size_tensor = input_tensor.shape[0]
|
21 |
+
|
22 |
+
xx_ones = torch.ones([1, self.y_dim], dtype=torch.int32).to(device)
|
23 |
+
xx_ones = xx_ones.unsqueeze(-1)
|
24 |
+
|
25 |
+
xx_range = torch.arange(self.x_dim, dtype=torch.int32).unsqueeze(0).to(device)
|
26 |
+
xx_range = xx_range.unsqueeze(1)
|
27 |
+
|
28 |
+
xx_channel = torch.matmul(xx_ones.float(), xx_range.float())
|
29 |
+
xx_channel = xx_channel.unsqueeze(-1)
|
30 |
+
|
31 |
+
|
32 |
+
yy_ones = torch.ones([1, self.x_dim], dtype=torch.int32).to(device)
|
33 |
+
yy_ones = yy_ones.unsqueeze(1)
|
34 |
+
|
35 |
+
yy_range = torch.arange(self.y_dim, dtype=torch.int32).unsqueeze(0).to(device)
|
36 |
+
yy_range = yy_range.unsqueeze(-1)
|
37 |
+
|
38 |
+
yy_channel = torch.matmul(yy_range.float(), yy_ones.float())
|
39 |
+
yy_channel = yy_channel.unsqueeze(-1)
|
40 |
+
|
41 |
+
xx_channel = xx_channel.permute(0, 3, 2, 1)
|
42 |
+
yy_channel = yy_channel.permute(0, 3, 2, 1)
|
43 |
+
|
44 |
+
xx_channel = xx_channel / (self.x_dim - 1)
|
45 |
+
yy_channel = yy_channel / (self.y_dim - 1)
|
46 |
+
|
47 |
+
xx_channel = xx_channel * 2 - 1
|
48 |
+
yy_channel = yy_channel * 2 - 1
|
49 |
+
|
50 |
+
xx_channel = xx_channel.repeat(batch_size_tensor, 1, 1, 1)
|
51 |
+
yy_channel = yy_channel.repeat(batch_size_tensor, 1, 1, 1)
|
52 |
+
|
53 |
+
if self.with_boundary and type(heatmap) != type(None):
|
54 |
+
boundary_channel = torch.clamp(heatmap[:, -1:, :, :],
|
55 |
+
0.0, 1.0)
|
56 |
+
|
57 |
+
zero_tensor = torch.zeros_like(xx_channel)
|
58 |
+
xx_boundary_channel = torch.where(boundary_channel>0.05,
|
59 |
+
xx_channel, zero_tensor)
|
60 |
+
yy_boundary_channel = torch.where(boundary_channel>0.05,
|
61 |
+
yy_channel, zero_tensor)
|
62 |
+
if self.with_boundary and type(heatmap) != type(None):
|
63 |
+
xx_boundary_channel = xx_boundary_channel.to(device)
|
64 |
+
yy_boundary_channel = yy_boundary_channel.to(device)
|
65 |
+
|
66 |
+
ret = torch.cat([input_tensor, xx_channel, yy_channel], dim=1)
|
67 |
+
|
68 |
+
|
69 |
+
if self.with_r:
|
70 |
+
rr = torch.sqrt(torch.pow(xx_channel, 2) + torch.pow(yy_channel, 2))
|
71 |
+
rr = rr / torch.max(rr)
|
72 |
+
ret = torch.cat([ret, rr], dim=1)
|
73 |
+
|
74 |
+
if self.with_boundary and type(heatmap) != type(None):
|
75 |
+
ret = torch.cat([ret, xx_boundary_channel,
|
76 |
+
yy_boundary_channel], dim=1)
|
77 |
+
return ret
|
78 |
+
|
79 |
+
|
80 |
+
class CoordConvTh(nn.Module):
|
81 |
+
"""CoordConv layer as in the paper."""
|
82 |
+
def __init__(self, x_dim, y_dim, with_r, with_boundary,
|
83 |
+
in_channels, first_one=False, *args, **kwargs):
|
84 |
+
super(CoordConvTh, self).__init__()
|
85 |
+
self.addcoords = AddCoordsTh(x_dim=x_dim, y_dim=y_dim, with_r=with_r,
|
86 |
+
with_boundary=with_boundary)
|
87 |
+
in_channels += 2
|
88 |
+
if with_r:
|
89 |
+
in_channels += 1
|
90 |
+
if with_boundary and not first_one:
|
91 |
+
in_channels += 2
|
92 |
+
self.conv = nn.Conv2d(in_channels=in_channels, *args, **kwargs)
|
93 |
+
|
94 |
+
def forward(self, input_tensor, heatmap=None):
|
95 |
+
ret = self.addcoords(input_tensor, heatmap)
|
96 |
+
last_channel = ret[:, -2:, :, :]
|
97 |
+
ret = self.conv(ret)
|
98 |
+
return ret, last_channel
|
99 |
+
|
100 |
+
|
101 |
+
'''
|
102 |
+
An alternative implementation for PyTorch with auto-infering the x-y dimensions.
|
103 |
+
'''
|
104 |
+
class AddCoords(nn.Module):
|
105 |
+
|
106 |
+
def __init__(self, with_r=False):
|
107 |
+
super().__init__()
|
108 |
+
self.with_r = with_r
|
109 |
+
|
110 |
+
def forward(self, input_tensor):
|
111 |
+
"""
|
112 |
+
Args:
|
113 |
+
input_tensor: shape(batch, channel, x_dim, y_dim)
|
114 |
+
"""
|
115 |
+
batch_size, _, x_dim, y_dim = input_tensor.size()
|
116 |
+
|
117 |
+
xx_channel = torch.arange(x_dim).repeat(1, y_dim, 1)
|
118 |
+
yy_channel = torch.arange(y_dim).repeat(1, x_dim, 1).transpose(1, 2)
|
119 |
+
|
120 |
+
xx_channel = xx_channel / (x_dim - 1)
|
121 |
+
yy_channel = yy_channel / (y_dim - 1)
|
122 |
+
|
123 |
+
xx_channel = xx_channel * 2 - 1
|
124 |
+
yy_channel = yy_channel * 2 - 1
|
125 |
+
|
126 |
+
xx_channel = xx_channel.repeat(batch_size, 1, 1, 1).transpose(2, 3)
|
127 |
+
yy_channel = yy_channel.repeat(batch_size, 1, 1, 1).transpose(2, 3)
|
128 |
+
|
129 |
+
if input_tensor.is_cuda:
|
130 |
+
xx_channel = xx_channel.to(device)
|
131 |
+
yy_channel = yy_channel.to(device)
|
132 |
+
|
133 |
+
ret = torch.cat([
|
134 |
+
input_tensor,
|
135 |
+
xx_channel.type_as(input_tensor),
|
136 |
+
yy_channel.type_as(input_tensor)], dim=1)
|
137 |
+
|
138 |
+
if self.with_r:
|
139 |
+
rr = torch.sqrt(torch.pow(xx_channel - 0.5, 2) + torch.pow(yy_channel - 0.5, 2))
|
140 |
+
if input_tensor.is_cuda:
|
141 |
+
rr = rr.to(device)
|
142 |
+
ret = torch.cat([ret, rr], dim=1)
|
143 |
+
|
144 |
+
return ret
|
145 |
+
|
146 |
+
|
147 |
+
class CoordConv(nn.Module):
|
148 |
+
|
149 |
+
def __init__(self, in_channels, out_channels, with_r=False, **kwargs):
|
150 |
+
super().__init__()
|
151 |
+
self.addcoords = AddCoords(with_r=with_r)
|
152 |
+
self.conv = nn.Conv2d(in_channels + 2, out_channels, **kwargs)
|
153 |
+
|
154 |
+
def forward(self, x):
|
155 |
+
ret = self.addcoords(x)
|
156 |
+
ret = self.conv(ret)
|
157 |
+
return ret
|
MakeItTalk/thirdparty/AdaptiveWingLoss/core/dataloader.py
CHANGED
@@ -1,368 +1,368 @@
|
|
1 |
-
import sys
|
2 |
-
import os
|
3 |
-
import random
|
4 |
-
import glob
|
5 |
-
import torch
|
6 |
-
from skimage import io
|
7 |
-
from skimage import transform as ski_transform
|
8 |
-
from skimage.color import rgb2gray
|
9 |
-
import scipy.io as sio
|
10 |
-
from scipy import interpolate
|
11 |
-
import numpy as np
|
12 |
-
import matplotlib.pyplot as plt
|
13 |
-
from torch.utils.data import Dataset, DataLoader
|
14 |
-
from torchvision import transforms, utils
|
15 |
-
from torchvision.transforms import Lambda, Compose
|
16 |
-
from torchvision.transforms.functional import adjust_brightness, adjust_contrast, adjust_saturation, adjust_hue
|
17 |
-
from utils.utils import cv_crop, cv_rotate, draw_gaussian, transform, power_transform, shuffle_lr, fig2data, generate_weight_map
|
18 |
-
from PIL import Image
|
19 |
-
import cv2
|
20 |
-
import copy
|
21 |
-
import math
|
22 |
-
from imgaug import augmenters as iaa
|
23 |
-
|
24 |
-
|
25 |
-
class AddBoundary(object):
|
26 |
-
def __init__(self, num_landmarks=68):
|
27 |
-
self.num_landmarks = num_landmarks
|
28 |
-
|
29 |
-
def __call__(self, sample):
|
30 |
-
landmarks_64 = np.floor(sample['landmarks'] / 4.0)
|
31 |
-
if self.num_landmarks == 68:
|
32 |
-
boundaries = {}
|
33 |
-
boundaries['cheek'] = landmarks_64[0:17]
|
34 |
-
boundaries['left_eyebrow'] = landmarks_64[17:22]
|
35 |
-
boundaries['right_eyebrow'] = landmarks_64[22:27]
|
36 |
-
boundaries['uper_left_eyelid'] = landmarks_64[36:40]
|
37 |
-
boundaries['lower_left_eyelid'] = np.array([landmarks_64[i] for i in [36, 41, 40, 39]])
|
38 |
-
boundaries['upper_right_eyelid'] = landmarks_64[42:46]
|
39 |
-
boundaries['lower_right_eyelid'] = np.array([landmarks_64[i] for i in [42, 47, 46, 45]])
|
40 |
-
boundaries['noise'] = landmarks_64[27:31]
|
41 |
-
boundaries['noise_bot'] = landmarks_64[31:36]
|
42 |
-
boundaries['upper_outer_lip'] = landmarks_64[48:55]
|
43 |
-
boundaries['upper_inner_lip'] = np.array([landmarks_64[i] for i in [60, 61, 62, 63, 64]])
|
44 |
-
boundaries['lower_outer_lip'] = np.array([landmarks_64[i] for i in [48, 59, 58, 57, 56, 55, 54]])
|
45 |
-
boundaries['lower_inner_lip'] = np.array([landmarks_64[i] for i in [60, 67, 66, 65, 64]])
|
46 |
-
elif self.num_landmarks == 98:
|
47 |
-
boundaries = {}
|
48 |
-
boundaries['cheek'] = landmarks_64[0:33]
|
49 |
-
boundaries['left_eyebrow'] = landmarks_64[33:38]
|
50 |
-
boundaries['right_eyebrow'] = landmarks_64[42:47]
|
51 |
-
boundaries['uper_left_eyelid'] = landmarks_64[60:65]
|
52 |
-
boundaries['lower_left_eyelid'] = np.array([landmarks_64[i] for i in [60, 67, 66, 65, 64]])
|
53 |
-
boundaries['upper_right_eyelid'] = landmarks_64[68:73]
|
54 |
-
boundaries['lower_right_eyelid'] = np.array([landmarks_64[i] for i in [68, 75, 74, 73, 72]])
|
55 |
-
boundaries['noise'] = landmarks_64[51:55]
|
56 |
-
boundaries['noise_bot'] = landmarks_64[55:60]
|
57 |
-
boundaries['upper_outer_lip'] = landmarks_64[76:83]
|
58 |
-
boundaries['upper_inner_lip'] = np.array([landmarks_64[i] for i in [88, 89, 90, 91, 92]])
|
59 |
-
boundaries['lower_outer_lip'] = np.array([landmarks_64[i] for i in [76, 87, 86, 85, 84, 83, 82]])
|
60 |
-
boundaries['lower_inner_lip'] = np.array([landmarks_64[i] for i in [88, 95, 94, 93, 92]])
|
61 |
-
elif self.num_landmarks == 19:
|
62 |
-
boundaries = {}
|
63 |
-
boundaries['left_eyebrow'] = landmarks_64[0:3]
|
64 |
-
boundaries['right_eyebrow'] = landmarks_64[3:5]
|
65 |
-
boundaries['left_eye'] = landmarks_64[6:9]
|
66 |
-
boundaries['right_eye'] = landmarks_64[9:12]
|
67 |
-
boundaries['noise'] = landmarks_64[12:15]
|
68 |
-
|
69 |
-
elif self.num_landmarks == 29:
|
70 |
-
boundaries = {}
|
71 |
-
boundaries['upper_left_eyebrow'] = np.stack([
|
72 |
-
landmarks_64[0],
|
73 |
-
landmarks_64[4],
|
74 |
-
landmarks_64[2]
|
75 |
-
], axis=0)
|
76 |
-
boundaries['lower_left_eyebrow'] = np.stack([
|
77 |
-
landmarks_64[0],
|
78 |
-
landmarks_64[5],
|
79 |
-
landmarks_64[2]
|
80 |
-
], axis=0)
|
81 |
-
boundaries['upper_right_eyebrow'] = np.stack([
|
82 |
-
landmarks_64[1],
|
83 |
-
landmarks_64[6],
|
84 |
-
landmarks_64[3]
|
85 |
-
], axis=0)
|
86 |
-
boundaries['lower_right_eyebrow'] = np.stack([
|
87 |
-
landmarks_64[1],
|
88 |
-
landmarks_64[7],
|
89 |
-
landmarks_64[3]
|
90 |
-
], axis=0)
|
91 |
-
boundaries['upper_left_eye'] = np.stack([
|
92 |
-
landmarks_64[8],
|
93 |
-
landmarks_64[12],
|
94 |
-
landmarks_64[10]
|
95 |
-
], axis=0)
|
96 |
-
boundaries['lower_left_eye'] = np.stack([
|
97 |
-
landmarks_64[8],
|
98 |
-
landmarks_64[13],
|
99 |
-
landmarks_64[10]
|
100 |
-
], axis=0)
|
101 |
-
boundaries['upper_right_eye'] = np.stack([
|
102 |
-
landmarks_64[9],
|
103 |
-
landmarks_64[14],
|
104 |
-
landmarks_64[11]
|
105 |
-
], axis=0)
|
106 |
-
boundaries['lower_right_eye'] = np.stack([
|
107 |
-
landmarks_64[9],
|
108 |
-
landmarks_64[15],
|
109 |
-
landmarks_64[11]
|
110 |
-
], axis=0)
|
111 |
-
boundaries['noise'] = np.stack([
|
112 |
-
landmarks_64[18],
|
113 |
-
landmarks_64[21],
|
114 |
-
landmarks_64[19]
|
115 |
-
], axis=0)
|
116 |
-
boundaries['outer_upper_lip'] = np.stack([
|
117 |
-
landmarks_64[22],
|
118 |
-
landmarks_64[24],
|
119 |
-
landmarks_64[23]
|
120 |
-
], axis=0)
|
121 |
-
boundaries['inner_upper_lip'] = np.stack([
|
122 |
-
landmarks_64[22],
|
123 |
-
landmarks_64[25],
|
124 |
-
landmarks_64[23]
|
125 |
-
], axis=0)
|
126 |
-
boundaries['outer_lower_lip'] = np.stack([
|
127 |
-
landmarks_64[22],
|
128 |
-
landmarks_64[26],
|
129 |
-
landmarks_64[23]
|
130 |
-
], axis=0)
|
131 |
-
boundaries['inner_lower_lip'] = np.stack([
|
132 |
-
landmarks_64[22],
|
133 |
-
landmarks_64[27],
|
134 |
-
landmarks_64[23]
|
135 |
-
], axis=0)
|
136 |
-
functions = {}
|
137 |
-
|
138 |
-
for key, points in boundaries.items():
|
139 |
-
temp = points[0]
|
140 |
-
new_points = points[0:1, :]
|
141 |
-
for point in points[1:]:
|
142 |
-
if point[0] == temp[0] and point[1] == temp[1]:
|
143 |
-
continue
|
144 |
-
else:
|
145 |
-
new_points = np.concatenate((new_points, np.expand_dims(point, 0)), axis=0)
|
146 |
-
temp = point
|
147 |
-
points = new_points
|
148 |
-
if points.shape[0] == 1:
|
149 |
-
points = np.concatenate((points, points+0.001), axis=0)
|
150 |
-
k = min(4, points.shape[0])
|
151 |
-
functions[key] = interpolate.splprep([points[:, 0], points[:, 1]], k=k-1,s=0)
|
152 |
-
|
153 |
-
boundary_map = np.zeros((64, 64))
|
154 |
-
|
155 |
-
fig = plt.figure(figsize=[64/96.0, 64/96.0], dpi=96)
|
156 |
-
|
157 |
-
ax = fig.add_axes([0, 0, 1, 1])
|
158 |
-
|
159 |
-
ax.axis('off')
|
160 |
-
|
161 |
-
ax.imshow(boundary_map, interpolation='nearest', cmap='gray')
|
162 |
-
#ax.scatter(landmarks[:, 0], landmarks[:, 1], s=1, marker=',', c='w')
|
163 |
-
|
164 |
-
for key in functions.keys():
|
165 |
-
xnew = np.arange(0, 1, 0.01)
|
166 |
-
out = interpolate.splev(xnew, functions[key][0], der=0)
|
167 |
-
plt.plot(out[0], out[1], ',', linewidth=1, color='w')
|
168 |
-
|
169 |
-
img = fig2data(fig)
|
170 |
-
|
171 |
-
plt.close()
|
172 |
-
|
173 |
-
sigma = 1
|
174 |
-
temp = 255-img[:,:,1]
|
175 |
-
temp = cv2.distanceTransform(temp, cv2.DIST_L2, cv2.DIST_MASK_PRECISE)
|
176 |
-
temp = temp.astype(np.float32)
|
177 |
-
temp = np.where(temp < 3*sigma, np.exp(-(temp*temp)/(2*sigma*sigma)), 0 )
|
178 |
-
|
179 |
-
fig = plt.figure(figsize=[64/96.0, 64/96.0], dpi=96)
|
180 |
-
|
181 |
-
ax = fig.add_axes([0, 0, 1, 1])
|
182 |
-
|
183 |
-
ax.axis('off')
|
184 |
-
ax.imshow(temp, cmap='gray')
|
185 |
-
plt.close()
|
186 |
-
|
187 |
-
boundary_map = fig2data(fig)
|
188 |
-
|
189 |
-
sample['boundary'] = boundary_map[:, :, 0]
|
190 |
-
|
191 |
-
return sample
|
192 |
-
|
193 |
-
class AddWeightMap(object):
|
194 |
-
def __call__(self, sample):
|
195 |
-
heatmap= sample['heatmap']
|
196 |
-
boundary = sample['boundary']
|
197 |
-
heatmap = np.concatenate((heatmap, np.expand_dims(boundary, axis=0)), 0)
|
198 |
-
weight_map = np.zeros_like(heatmap)
|
199 |
-
for i in range(heatmap.shape[0]):
|
200 |
-
weight_map[i] = generate_weight_map(weight_map[i],
|
201 |
-
heatmap[i])
|
202 |
-
sample['weight_map'] = weight_map
|
203 |
-
return sample
|
204 |
-
|
205 |
-
class ToTensor(object):
|
206 |
-
"""Convert ndarrays in sample to Tensors."""
|
207 |
-
|
208 |
-
def __call__(self, sample):
|
209 |
-
image, heatmap, landmarks, boundary, weight_map= sample['image'], sample['heatmap'], sample['landmarks'], sample['boundary'], sample['weight_map']
|
210 |
-
|
211 |
-
# swap color axis because
|
212 |
-
# numpy image: H x W x C
|
213 |
-
# torch image: C X H X W
|
214 |
-
if len(image.shape) == 2:
|
215 |
-
image = np.expand_dims(image, axis=2)
|
216 |
-
image_small = np.expand_dims(image_small, axis=2)
|
217 |
-
image = image.transpose((2, 0, 1))
|
218 |
-
boundary = np.expand_dims(boundary, axis=2)
|
219 |
-
boundary = boundary.transpose((2, 0, 1))
|
220 |
-
return {'image': torch.from_numpy(image).float().div(255.0),
|
221 |
-
'heatmap': torch.from_numpy(heatmap).float(),
|
222 |
-
'landmarks': torch.from_numpy(landmarks).float(),
|
223 |
-
'boundary': torch.from_numpy(boundary).float().div(255.0),
|
224 |
-
'weight_map': torch.from_numpy(weight_map).float()}
|
225 |
-
|
226 |
-
class FaceLandmarksDataset(Dataset):
|
227 |
-
"""Face Landmarks dataset."""
|
228 |
-
|
229 |
-
def __init__(self, img_dir, landmarks_dir, num_landmarks=68, gray_scale=False,
|
230 |
-
detect_face=False, enhance=False, center_shift=0,
|
231 |
-
transform=None,):
|
232 |
-
"""
|
233 |
-
Args:
|
234 |
-
landmark_dir (string): Path to the mat file with landmarks saved.
|
235 |
-
img_dir (string): Directory with all the images.
|
236 |
-
transform (callable, optional): Optional transform to be applied
|
237 |
-
on a sample.
|
238 |
-
"""
|
239 |
-
self.img_dir = img_dir
|
240 |
-
self.landmarks_dir = landmarks_dir
|
241 |
-
self.num_lanmdkars = num_landmarks
|
242 |
-
self.transform = transform
|
243 |
-
self.img_names = glob.glob(self.img_dir+'*.jpg') + \
|
244 |
-
glob.glob(self.img_dir+'*.png')
|
245 |
-
self.gray_scale = gray_scale
|
246 |
-
self.detect_face = detect_face
|
247 |
-
self.enhance = enhance
|
248 |
-
self.center_shift = center_shift
|
249 |
-
if self.detect_face:
|
250 |
-
self.face_detector = MTCNN(thresh=[0.5, 0.6, 0.7])
|
251 |
-
def __len__(self):
|
252 |
-
return len(self.img_names)
|
253 |
-
|
254 |
-
def __getitem__(self, idx):
|
255 |
-
img_name = self.img_names[idx]
|
256 |
-
pil_image = Image.open(img_name)
|
257 |
-
if pil_image.mode != "RGB":
|
258 |
-
# if input is grayscale image, convert it to 3 channel image
|
259 |
-
if self.enhance:
|
260 |
-
pil_image = power_transform(pil_image, 0.5)
|
261 |
-
temp_image = Image.new('RGB', pil_image.size)
|
262 |
-
temp_image.paste(pil_image)
|
263 |
-
pil_image = temp_image
|
264 |
-
image = np.array(pil_image)
|
265 |
-
if self.gray_scale:
|
266 |
-
image = rgb2gray(image)
|
267 |
-
image = np.expand_dims(image, axis=2)
|
268 |
-
image = np.concatenate((image, image, image), axis=2)
|
269 |
-
image = image * 255.0
|
270 |
-
image = image.astype(np.uint8)
|
271 |
-
if not self.detect_face:
|
272 |
-
center = [450//2, 450//2+0]
|
273 |
-
if self.center_shift != 0:
|
274 |
-
center[0] += int(np.random.uniform(-self.center_shift,
|
275 |
-
self.center_shift))
|
276 |
-
center[1] += int(np.random.uniform(-self.center_shift,
|
277 |
-
self.center_shift))
|
278 |
-
scale = 1.8
|
279 |
-
else:
|
280 |
-
detected_faces = self.face_detector.detect_image(image)
|
281 |
-
if len(detected_faces) > 0:
|
282 |
-
box = detected_faces[0]
|
283 |
-
left, top, right, bottom, _ = box
|
284 |
-
center = [right - (right - left) / 2.0,
|
285 |
-
bottom - (bottom - top) / 2.0]
|
286 |
-
center[1] = center[1] - (bottom - top) * 0.12
|
287 |
-
scale = (right - left + bottom - top) / 195.0
|
288 |
-
else:
|
289 |
-
center = [450//2, 450//2+0]
|
290 |
-
scale = 1.8
|
291 |
-
if self.center_shift != 0:
|
292 |
-
shift = self.center * self.center_shift / 450
|
293 |
-
center[0] += int(np.random.uniform(-shift, shift))
|
294 |
-
center[1] += int(np.random.uniform(-shift, shift))
|
295 |
-
base_name = os.path.basename(img_name)
|
296 |
-
landmarks_base_name = base_name[:-4] + '_pts.mat'
|
297 |
-
landmarks_name = os.path.join(self.landmarks_dir, landmarks_base_name)
|
298 |
-
if os.path.isfile(landmarks_name):
|
299 |
-
mat_data = sio.loadmat(landmarks_name)
|
300 |
-
landmarks = mat_data['pts_2d']
|
301 |
-
elif os.path.isfile(landmarks_name[:-8] + '.pts.npy'):
|
302 |
-
landmarks = np.load(landmarks_name[:-8] + '.pts.npy')
|
303 |
-
else:
|
304 |
-
landmarks = []
|
305 |
-
heatmap = []
|
306 |
-
|
307 |
-
if landmarks != []:
|
308 |
-
new_image, new_landmarks = cv_crop(image, landmarks, center,
|
309 |
-
scale, 256, self.center_shift)
|
310 |
-
tries = 0
|
311 |
-
while self.center_shift != 0 and tries < 5 and (np.max(new_landmarks) > 240 or np.min(new_landmarks) < 15):
|
312 |
-
center = [450//2, 450//2+0]
|
313 |
-
scale += 0.05
|
314 |
-
center[0] += int(np.random.uniform(-self.center_shift,
|
315 |
-
self.center_shift))
|
316 |
-
center[1] += int(np.random.uniform(-self.center_shift,
|
317 |
-
self.center_shift))
|
318 |
-
|
319 |
-
new_image, new_landmarks = cv_crop(image, landmarks,
|
320 |
-
center, scale, 256,
|
321 |
-
self.center_shift)
|
322 |
-
tries += 1
|
323 |
-
if np.max(new_landmarks) > 250 or np.min(new_landmarks) < 5:
|
324 |
-
center = [450//2, 450//2+0]
|
325 |
-
scale = 2.25
|
326 |
-
new_image, new_landmarks = cv_crop(image, landmarks,
|
327 |
-
center, scale, 256,
|
328 |
-
100)
|
329 |
-
assert (np.min(new_landmarks) > 0 and np.max(new_landmarks) < 256), \
|
330 |
-
"Landmarks out of boundary!"
|
331 |
-
image = new_image
|
332 |
-
landmarks = new_landmarks
|
333 |
-
heatmap = np.zeros((self.num_lanmdkars, 64, 64))
|
334 |
-
for i in range(self.num_lanmdkars):
|
335 |
-
if landmarks[i][0] > 0:
|
336 |
-
heatmap[i] = draw_gaussian(heatmap[i], landmarks[i]/4.0+1, 1)
|
337 |
-
sample = {'image': image, 'heatmap': heatmap, 'landmarks': landmarks}
|
338 |
-
if self.transform:
|
339 |
-
sample = self.transform(sample)
|
340 |
-
|
341 |
-
return sample
|
342 |
-
|
343 |
-
def get_dataset(val_img_dir, val_landmarks_dir, batch_size,
|
344 |
-
num_landmarks=68, rotation=0, scale=0,
|
345 |
-
center_shift=0, random_flip=False,
|
346 |
-
brightness=0, contrast=0, saturation=0,
|
347 |
-
blur=False, noise=False, jpeg_effect=False,
|
348 |
-
random_occlusion=False, gray_scale=False,
|
349 |
-
detect_face=False, enhance=False):
|
350 |
-
val_transforms = transforms.Compose([AddBoundary(num_landmarks),
|
351 |
-
AddWeightMap(),
|
352 |
-
ToTensor()])
|
353 |
-
|
354 |
-
val_dataset = FaceLandmarksDataset(val_img_dir, val_landmarks_dir,
|
355 |
-
num_landmarks=num_landmarks,
|
356 |
-
gray_scale=gray_scale,
|
357 |
-
detect_face=detect_face,
|
358 |
-
enhance=enhance,
|
359 |
-
transform=val_transforms)
|
360 |
-
|
361 |
-
val_dataloader = torch.utils.data.DataLoader(val_dataset,
|
362 |
-
batch_size=batch_size,
|
363 |
-
shuffle=False,
|
364 |
-
num_workers=6)
|
365 |
-
data_loaders = {'val': val_dataloader}
|
366 |
-
dataset_sizes = {}
|
367 |
-
dataset_sizes['val'] = len(val_dataset)
|
368 |
-
return data_loaders, dataset_sizes
|
|
|
1 |
+
import sys
|
2 |
+
import os
|
3 |
+
import random
|
4 |
+
import glob
|
5 |
+
import torch
|
6 |
+
from skimage import io
|
7 |
+
from skimage import transform as ski_transform
|
8 |
+
from skimage.color import rgb2gray
|
9 |
+
import scipy.io as sio
|
10 |
+
from scipy import interpolate
|
11 |
+
import numpy as np
|
12 |
+
import matplotlib.pyplot as plt
|
13 |
+
from torch.utils.data import Dataset, DataLoader
|
14 |
+
from torchvision import transforms, utils
|
15 |
+
from torchvision.transforms import Lambda, Compose
|
16 |
+
from torchvision.transforms.functional import adjust_brightness, adjust_contrast, adjust_saturation, adjust_hue
|
17 |
+
from utils.utils import cv_crop, cv_rotate, draw_gaussian, transform, power_transform, shuffle_lr, fig2data, generate_weight_map
|
18 |
+
from PIL import Image
|
19 |
+
import cv2
|
20 |
+
import copy
|
21 |
+
import math
|
22 |
+
from imgaug import augmenters as iaa
|
23 |
+
|
24 |
+
|
25 |
+
class AddBoundary(object):
|
26 |
+
def __init__(self, num_landmarks=68):
|
27 |
+
self.num_landmarks = num_landmarks
|
28 |
+
|
29 |
+
def __call__(self, sample):
|
30 |
+
landmarks_64 = np.floor(sample['landmarks'] / 4.0)
|
31 |
+
if self.num_landmarks == 68:
|
32 |
+
boundaries = {}
|
33 |
+
boundaries['cheek'] = landmarks_64[0:17]
|
34 |
+
boundaries['left_eyebrow'] = landmarks_64[17:22]
|
35 |
+
boundaries['right_eyebrow'] = landmarks_64[22:27]
|
36 |
+
boundaries['uper_left_eyelid'] = landmarks_64[36:40]
|
37 |
+
boundaries['lower_left_eyelid'] = np.array([landmarks_64[i] for i in [36, 41, 40, 39]])
|
38 |
+
boundaries['upper_right_eyelid'] = landmarks_64[42:46]
|
39 |
+
boundaries['lower_right_eyelid'] = np.array([landmarks_64[i] for i in [42, 47, 46, 45]])
|
40 |
+
boundaries['noise'] = landmarks_64[27:31]
|
41 |
+
boundaries['noise_bot'] = landmarks_64[31:36]
|
42 |
+
boundaries['upper_outer_lip'] = landmarks_64[48:55]
|
43 |
+
boundaries['upper_inner_lip'] = np.array([landmarks_64[i] for i in [60, 61, 62, 63, 64]])
|
44 |
+
boundaries['lower_outer_lip'] = np.array([landmarks_64[i] for i in [48, 59, 58, 57, 56, 55, 54]])
|
45 |
+
boundaries['lower_inner_lip'] = np.array([landmarks_64[i] for i in [60, 67, 66, 65, 64]])
|
46 |
+
elif self.num_landmarks == 98:
|
47 |
+
boundaries = {}
|
48 |
+
boundaries['cheek'] = landmarks_64[0:33]
|
49 |
+
boundaries['left_eyebrow'] = landmarks_64[33:38]
|
50 |
+
boundaries['right_eyebrow'] = landmarks_64[42:47]
|
51 |
+
boundaries['uper_left_eyelid'] = landmarks_64[60:65]
|
52 |
+
boundaries['lower_left_eyelid'] = np.array([landmarks_64[i] for i in [60, 67, 66, 65, 64]])
|
53 |
+
boundaries['upper_right_eyelid'] = landmarks_64[68:73]
|
54 |
+
boundaries['lower_right_eyelid'] = np.array([landmarks_64[i] for i in [68, 75, 74, 73, 72]])
|
55 |
+
boundaries['noise'] = landmarks_64[51:55]
|
56 |
+
boundaries['noise_bot'] = landmarks_64[55:60]
|
57 |
+
boundaries['upper_outer_lip'] = landmarks_64[76:83]
|
58 |
+
boundaries['upper_inner_lip'] = np.array([landmarks_64[i] for i in [88, 89, 90, 91, 92]])
|
59 |
+
boundaries['lower_outer_lip'] = np.array([landmarks_64[i] for i in [76, 87, 86, 85, 84, 83, 82]])
|
60 |
+
boundaries['lower_inner_lip'] = np.array([landmarks_64[i] for i in [88, 95, 94, 93, 92]])
|
61 |
+
elif self.num_landmarks == 19:
|
62 |
+
boundaries = {}
|
63 |
+
boundaries['left_eyebrow'] = landmarks_64[0:3]
|
64 |
+
boundaries['right_eyebrow'] = landmarks_64[3:5]
|
65 |
+
boundaries['left_eye'] = landmarks_64[6:9]
|
66 |
+
boundaries['right_eye'] = landmarks_64[9:12]
|
67 |
+
boundaries['noise'] = landmarks_64[12:15]
|
68 |
+
|
69 |
+
elif self.num_landmarks == 29:
|
70 |
+
boundaries = {}
|
71 |
+
boundaries['upper_left_eyebrow'] = np.stack([
|
72 |
+
landmarks_64[0],
|
73 |
+
landmarks_64[4],
|
74 |
+
landmarks_64[2]
|
75 |
+
], axis=0)
|
76 |
+
boundaries['lower_left_eyebrow'] = np.stack([
|
77 |
+
landmarks_64[0],
|
78 |
+
landmarks_64[5],
|
79 |
+
landmarks_64[2]
|
80 |
+
], axis=0)
|
81 |
+
boundaries['upper_right_eyebrow'] = np.stack([
|
82 |
+
landmarks_64[1],
|
83 |
+
landmarks_64[6],
|
84 |
+
landmarks_64[3]
|
85 |
+
], axis=0)
|
86 |
+
boundaries['lower_right_eyebrow'] = np.stack([
|
87 |
+
landmarks_64[1],
|
88 |
+
landmarks_64[7],
|
89 |
+
landmarks_64[3]
|
90 |
+
], axis=0)
|
91 |
+
boundaries['upper_left_eye'] = np.stack([
|
92 |
+
landmarks_64[8],
|
93 |
+
landmarks_64[12],
|
94 |
+
landmarks_64[10]
|
95 |
+
], axis=0)
|
96 |
+
boundaries['lower_left_eye'] = np.stack([
|
97 |
+
landmarks_64[8],
|
98 |
+
landmarks_64[13],
|
99 |
+
landmarks_64[10]
|
100 |
+
], axis=0)
|
101 |
+
boundaries['upper_right_eye'] = np.stack([
|
102 |
+
landmarks_64[9],
|
103 |
+
landmarks_64[14],
|
104 |
+
landmarks_64[11]
|
105 |
+
], axis=0)
|
106 |
+
boundaries['lower_right_eye'] = np.stack([
|
107 |
+
landmarks_64[9],
|
108 |
+
landmarks_64[15],
|
109 |
+
landmarks_64[11]
|
110 |
+
], axis=0)
|
111 |
+
boundaries['noise'] = np.stack([
|
112 |
+
landmarks_64[18],
|
113 |
+
landmarks_64[21],
|
114 |
+
landmarks_64[19]
|
115 |
+
], axis=0)
|
116 |
+
boundaries['outer_upper_lip'] = np.stack([
|
117 |
+
landmarks_64[22],
|
118 |
+
landmarks_64[24],
|
119 |
+
landmarks_64[23]
|
120 |
+
], axis=0)
|
121 |
+
boundaries['inner_upper_lip'] = np.stack([
|
122 |
+
landmarks_64[22],
|
123 |
+
landmarks_64[25],
|
124 |
+
landmarks_64[23]
|
125 |
+
], axis=0)
|
126 |
+
boundaries['outer_lower_lip'] = np.stack([
|
127 |
+
landmarks_64[22],
|
128 |
+
landmarks_64[26],
|
129 |
+
landmarks_64[23]
|
130 |
+
], axis=0)
|
131 |
+
boundaries['inner_lower_lip'] = np.stack([
|
132 |
+
landmarks_64[22],
|
133 |
+
landmarks_64[27],
|
134 |
+
landmarks_64[23]
|
135 |
+
], axis=0)
|
136 |
+
functions = {}
|
137 |
+
|
138 |
+
for key, points in boundaries.items():
|
139 |
+
temp = points[0]
|
140 |
+
new_points = points[0:1, :]
|
141 |
+
for point in points[1:]:
|
142 |
+
if point[0] == temp[0] and point[1] == temp[1]:
|
143 |
+
continue
|
144 |
+
else:
|
145 |
+
new_points = np.concatenate((new_points, np.expand_dims(point, 0)), axis=0)
|
146 |
+
temp = point
|
147 |
+
points = new_points
|
148 |
+
if points.shape[0] == 1:
|
149 |
+
points = np.concatenate((points, points+0.001), axis=0)
|
150 |
+
k = min(4, points.shape[0])
|
151 |
+
functions[key] = interpolate.splprep([points[:, 0], points[:, 1]], k=k-1,s=0)
|
152 |
+
|
153 |
+
boundary_map = np.zeros((64, 64))
|
154 |
+
|
155 |
+
fig = plt.figure(figsize=[64/96.0, 64/96.0], dpi=96)
|
156 |
+
|
157 |
+
ax = fig.add_axes([0, 0, 1, 1])
|
158 |
+
|
159 |
+
ax.axis('off')
|
160 |
+
|
161 |
+
ax.imshow(boundary_map, interpolation='nearest', cmap='gray')
|
162 |
+
#ax.scatter(landmarks[:, 0], landmarks[:, 1], s=1, marker=',', c='w')
|
163 |
+
|
164 |
+
for key in functions.keys():
|
165 |
+
xnew = np.arange(0, 1, 0.01)
|
166 |
+
out = interpolate.splev(xnew, functions[key][0], der=0)
|
167 |
+
plt.plot(out[0], out[1], ',', linewidth=1, color='w')
|
168 |
+
|
169 |
+
img = fig2data(fig)
|
170 |
+
|
171 |
+
plt.close()
|
172 |
+
|
173 |
+
sigma = 1
|
174 |
+
temp = 255-img[:,:,1]
|
175 |
+
temp = cv2.distanceTransform(temp, cv2.DIST_L2, cv2.DIST_MASK_PRECISE)
|
176 |
+
temp = temp.astype(np.float32)
|
177 |
+
temp = np.where(temp < 3*sigma, np.exp(-(temp*temp)/(2*sigma*sigma)), 0 )
|
178 |
+
|
179 |
+
fig = plt.figure(figsize=[64/96.0, 64/96.0], dpi=96)
|
180 |
+
|
181 |
+
ax = fig.add_axes([0, 0, 1, 1])
|
182 |
+
|
183 |
+
ax.axis('off')
|
184 |
+
ax.imshow(temp, cmap='gray')
|
185 |
+
plt.close()
|
186 |
+
|
187 |
+
boundary_map = fig2data(fig)
|
188 |
+
|
189 |
+
sample['boundary'] = boundary_map[:, :, 0]
|
190 |
+
|
191 |
+
return sample
|
192 |
+
|
193 |
+
class AddWeightMap(object):
|
194 |
+
def __call__(self, sample):
|
195 |
+
heatmap= sample['heatmap']
|
196 |
+
boundary = sample['boundary']
|
197 |
+
heatmap = np.concatenate((heatmap, np.expand_dims(boundary, axis=0)), 0)
|
198 |
+
weight_map = np.zeros_like(heatmap)
|
199 |
+
for i in range(heatmap.shape[0]):
|
200 |
+
weight_map[i] = generate_weight_map(weight_map[i],
|
201 |
+
heatmap[i])
|
202 |
+
sample['weight_map'] = weight_map
|
203 |
+
return sample
|
204 |
+
|
205 |
+
class ToTensor(object):
|
206 |
+
"""Convert ndarrays in sample to Tensors."""
|
207 |
+
|
208 |
+
def __call__(self, sample):
|
209 |
+
image, heatmap, landmarks, boundary, weight_map= sample['image'], sample['heatmap'], sample['landmarks'], sample['boundary'], sample['weight_map']
|
210 |
+
|
211 |
+
# swap color axis because
|
212 |
+
# numpy image: H x W x C
|
213 |
+
# torch image: C X H X W
|
214 |
+
if len(image.shape) == 2:
|
215 |
+
image = np.expand_dims(image, axis=2)
|
216 |
+
image_small = np.expand_dims(image_small, axis=2)
|
217 |
+
image = image.transpose((2, 0, 1))
|
218 |
+
boundary = np.expand_dims(boundary, axis=2)
|
219 |
+
boundary = boundary.transpose((2, 0, 1))
|
220 |
+
return {'image': torch.from_numpy(image).float().div(255.0),
|
221 |
+
'heatmap': torch.from_numpy(heatmap).float(),
|
222 |
+
'landmarks': torch.from_numpy(landmarks).float(),
|
223 |
+
'boundary': torch.from_numpy(boundary).float().div(255.0),
|
224 |
+
'weight_map': torch.from_numpy(weight_map).float()}
|
225 |
+
|
226 |
+
class FaceLandmarksDataset(Dataset):
|
227 |
+
"""Face Landmarks dataset."""
|
228 |
+
|
229 |
+
def __init__(self, img_dir, landmarks_dir, num_landmarks=68, gray_scale=False,
|
230 |
+
detect_face=False, enhance=False, center_shift=0,
|
231 |
+
transform=None,):
|
232 |
+
"""
|
233 |
+
Args:
|
234 |
+
landmark_dir (string): Path to the mat file with landmarks saved.
|
235 |
+
img_dir (string): Directory with all the images.
|
236 |
+
transform (callable, optional): Optional transform to be applied
|
237 |
+
on a sample.
|
238 |
+
"""
|
239 |
+
self.img_dir = img_dir
|
240 |
+
self.landmarks_dir = landmarks_dir
|
241 |
+
self.num_lanmdkars = num_landmarks
|
242 |
+
self.transform = transform
|
243 |
+
self.img_names = glob.glob(self.img_dir+'*.jpg') + \
|
244 |
+
glob.glob(self.img_dir+'*.png')
|
245 |
+
self.gray_scale = gray_scale
|
246 |
+
self.detect_face = detect_face
|
247 |
+
self.enhance = enhance
|
248 |
+
self.center_shift = center_shift
|
249 |
+
if self.detect_face:
|
250 |
+
self.face_detector = MTCNN(thresh=[0.5, 0.6, 0.7])
|
251 |
+
def __len__(self):
|
252 |
+
return len(self.img_names)
|
253 |
+
|
254 |
+
def __getitem__(self, idx):
|
255 |
+
img_name = self.img_names[idx]
|
256 |
+
pil_image = Image.open(img_name)
|
257 |
+
if pil_image.mode != "RGB":
|
258 |
+
# if input is grayscale image, convert it to 3 channel image
|
259 |
+
if self.enhance:
|
260 |
+
pil_image = power_transform(pil_image, 0.5)
|
261 |
+
temp_image = Image.new('RGB', pil_image.size)
|
262 |
+
temp_image.paste(pil_image)
|
263 |
+
pil_image = temp_image
|
264 |
+
image = np.array(pil_image)
|
265 |
+
if self.gray_scale:
|
266 |
+
image = rgb2gray(image)
|
267 |
+
image = np.expand_dims(image, axis=2)
|
268 |
+
image = np.concatenate((image, image, image), axis=2)
|
269 |
+
image = image * 255.0
|
270 |
+
image = image.astype(np.uint8)
|
271 |
+
if not self.detect_face:
|
272 |
+
center = [450//2, 450//2+0]
|
273 |
+
if self.center_shift != 0:
|
274 |
+
center[0] += int(np.random.uniform(-self.center_shift,
|
275 |
+
self.center_shift))
|
276 |
+
center[1] += int(np.random.uniform(-self.center_shift,
|
277 |
+
self.center_shift))
|
278 |
+
scale = 1.8
|
279 |
+
else:
|
280 |
+
detected_faces = self.face_detector.detect_image(image)
|
281 |
+
if len(detected_faces) > 0:
|
282 |
+
box = detected_faces[0]
|
283 |
+
left, top, right, bottom, _ = box
|
284 |
+
center = [right - (right - left) / 2.0,
|
285 |
+
bottom - (bottom - top) / 2.0]
|
286 |
+
center[1] = center[1] - (bottom - top) * 0.12
|
287 |
+
scale = (right - left + bottom - top) / 195.0
|
288 |
+
else:
|
289 |
+
center = [450//2, 450//2+0]
|
290 |
+
scale = 1.8
|
291 |
+
if self.center_shift != 0:
|
292 |
+
shift = self.center * self.center_shift / 450
|
293 |
+
center[0] += int(np.random.uniform(-shift, shift))
|
294 |
+
center[1] += int(np.random.uniform(-shift, shift))
|
295 |
+
base_name = os.path.basename(img_name)
|
296 |
+
landmarks_base_name = base_name[:-4] + '_pts.mat'
|
297 |
+
landmarks_name = os.path.join(self.landmarks_dir, landmarks_base_name)
|
298 |
+
if os.path.isfile(landmarks_name):
|
299 |
+
mat_data = sio.loadmat(landmarks_name)
|
300 |
+
landmarks = mat_data['pts_2d']
|
301 |
+
elif os.path.isfile(landmarks_name[:-8] + '.pts.npy'):
|
302 |
+
landmarks = np.load(landmarks_name[:-8] + '.pts.npy')
|
303 |
+
else:
|
304 |
+
landmarks = []
|
305 |
+
heatmap = []
|
306 |
+
|
307 |
+
if landmarks != []:
|
308 |
+
new_image, new_landmarks = cv_crop(image, landmarks, center,
|
309 |
+
scale, 256, self.center_shift)
|
310 |
+
tries = 0
|
311 |
+
while self.center_shift != 0 and tries < 5 and (np.max(new_landmarks) > 240 or np.min(new_landmarks) < 15):
|
312 |
+
center = [450//2, 450//2+0]
|
313 |
+
scale += 0.05
|
314 |
+
center[0] += int(np.random.uniform(-self.center_shift,
|
315 |
+
self.center_shift))
|
316 |
+
center[1] += int(np.random.uniform(-self.center_shift,
|
317 |
+
self.center_shift))
|
318 |
+
|
319 |
+
new_image, new_landmarks = cv_crop(image, landmarks,
|
320 |
+
center, scale, 256,
|
321 |
+
self.center_shift)
|
322 |
+
tries += 1
|
323 |
+
if np.max(new_landmarks) > 250 or np.min(new_landmarks) < 5:
|
324 |
+
center = [450//2, 450//2+0]
|
325 |
+
scale = 2.25
|
326 |
+
new_image, new_landmarks = cv_crop(image, landmarks,
|
327 |
+
center, scale, 256,
|
328 |
+
100)
|
329 |
+
assert (np.min(new_landmarks) > 0 and np.max(new_landmarks) < 256), \
|
330 |
+
"Landmarks out of boundary!"
|
331 |
+
image = new_image
|
332 |
+
landmarks = new_landmarks
|
333 |
+
heatmap = np.zeros((self.num_lanmdkars, 64, 64))
|
334 |
+
for i in range(self.num_lanmdkars):
|
335 |
+
if landmarks[i][0] > 0:
|
336 |
+
heatmap[i] = draw_gaussian(heatmap[i], landmarks[i]/4.0+1, 1)
|
337 |
+
sample = {'image': image, 'heatmap': heatmap, 'landmarks': landmarks}
|
338 |
+
if self.transform:
|
339 |
+
sample = self.transform(sample)
|
340 |
+
|
341 |
+
return sample
|
342 |
+
|
343 |
+
def get_dataset(val_img_dir, val_landmarks_dir, batch_size,
|
344 |
+
num_landmarks=68, rotation=0, scale=0,
|
345 |
+
center_shift=0, random_flip=False,
|
346 |
+
brightness=0, contrast=0, saturation=0,
|
347 |
+
blur=False, noise=False, jpeg_effect=False,
|
348 |
+
random_occlusion=False, gray_scale=False,
|
349 |
+
detect_face=False, enhance=False):
|
350 |
+
val_transforms = transforms.Compose([AddBoundary(num_landmarks),
|
351 |
+
AddWeightMap(),
|
352 |
+
ToTensor()])
|
353 |
+
|
354 |
+
val_dataset = FaceLandmarksDataset(val_img_dir, val_landmarks_dir,
|
355 |
+
num_landmarks=num_landmarks,
|
356 |
+
gray_scale=gray_scale,
|
357 |
+
detect_face=detect_face,
|
358 |
+
enhance=enhance,
|
359 |
+
transform=val_transforms)
|
360 |
+
|
361 |
+
val_dataloader = torch.utils.data.DataLoader(val_dataset,
|
362 |
+
batch_size=batch_size,
|
363 |
+
shuffle=False,
|
364 |
+
num_workers=6)
|
365 |
+
data_loaders = {'val': val_dataloader}
|
366 |
+
dataset_sizes = {}
|
367 |
+
dataset_sizes['val'] = len(val_dataset)
|
368 |
+
return data_loaders, dataset_sizes
|
MakeItTalk/thirdparty/AdaptiveWingLoss/core/evaler.py
CHANGED
@@ -1,151 +1,151 @@
|
|
1 |
-
import matplotlib
|
2 |
-
matplotlib.use('Agg')
|
3 |
-
import math
|
4 |
-
import torch
|
5 |
-
import copy
|
6 |
-
import time
|
7 |
-
from torch.autograd import Variable
|
8 |
-
import shutil
|
9 |
-
from skimage import io
|
10 |
-
import numpy as np
|
11 |
-
from utils.utils import fan_NME, show_landmarks, get_preds_fromhm
|
12 |
-
from PIL import Image, ImageDraw
|
13 |
-
import os
|
14 |
-
import sys
|
15 |
-
import cv2
|
16 |
-
import matplotlib.pyplot as plt
|
17 |
-
|
18 |
-
|
19 |
-
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
20 |
-
|
21 |
-
def eval_model(model, dataloaders, dataset_sizes,
|
22 |
-
writer, use_gpu=True, epoches=5, dataset='val',
|
23 |
-
save_path='./', num_landmarks=68):
|
24 |
-
global_nme = 0
|
25 |
-
model.eval()
|
26 |
-
for epoch in range(epoches):
|
27 |
-
running_loss = 0
|
28 |
-
step = 0
|
29 |
-
total_nme = 0
|
30 |
-
total_count = 0
|
31 |
-
fail_count = 0
|
32 |
-
nmes = []
|
33 |
-
# running_corrects = 0
|
34 |
-
|
35 |
-
# Iterate over data.
|
36 |
-
with torch.no_grad():
|
37 |
-
for data in dataloaders[dataset]:
|
38 |
-
total_runtime = 0
|
39 |
-
run_count = 0
|
40 |
-
step_start = time.time()
|
41 |
-
step += 1
|
42 |
-
# get the inputs
|
43 |
-
inputs = data['image'].type(torch.FloatTensor)
|
44 |
-
labels_heatmap = data['heatmap'].type(torch.FloatTensor)
|
45 |
-
labels_boundary = data['boundary'].type(torch.FloatTensor)
|
46 |
-
landmarks = data['landmarks'].type(torch.FloatTensor)
|
47 |
-
loss_weight_map = data['weight_map'].type(torch.FloatTensor)
|
48 |
-
# wrap them in Variable
|
49 |
-
if use_gpu:
|
50 |
-
inputs = inputs.to(device)
|
51 |
-
labels_heatmap = labels_heatmap.to(device)
|
52 |
-
labels_boundary = labels_boundary.to(device)
|
53 |
-
loss_weight_map = loss_weight_map.to(device)
|
54 |
-
else:
|
55 |
-
inputs, labels_heatmap = Variable(inputs), Variable(labels_heatmap)
|
56 |
-
labels_boundary = Variable(labels_boundary)
|
57 |
-
labels = torch.cat((labels_heatmap, labels_boundary), 1)
|
58 |
-
single_start = time.time()
|
59 |
-
outputs, boundary_channels = model(inputs)
|
60 |
-
single_end = time.time()
|
61 |
-
total_runtime += time.time() - single_start
|
62 |
-
run_count += 1
|
63 |
-
step_end = time.time()
|
64 |
-
for i in range(inputs.shape[0]):
|
65 |
-
print(inputs.shape)
|
66 |
-
img = inputs[i]
|
67 |
-
img = img.cpu().numpy()
|
68 |
-
img = img.transpose((1, 2, 0)) #*255.0
|
69 |
-
# img = img.astype(np.uint8)
|
70 |
-
# img = Image.fromarray(img)
|
71 |
-
# pred_heatmap = outputs[-1][i].detach().cpu()[:-1, :, :]
|
72 |
-
pred_heatmap = outputs[-1][:, :-1, :, :][i].detach().cpu()
|
73 |
-
pred_landmarks, _ = get_preds_fromhm(pred_heatmap.unsqueeze(0))
|
74 |
-
pred_landmarks = pred_landmarks.squeeze().numpy()
|
75 |
-
|
76 |
-
gt_landmarks = data['landmarks'][i].numpy()
|
77 |
-
print(pred_landmarks, gt_landmarks)
|
78 |
-
import cv2
|
79 |
-
while(True):
|
80 |
-
imgshow = vis_landmark_on_img(cv2.UMat(img), pred_landmarks*4)
|
81 |
-
cv2.imshow('img', imgshow)
|
82 |
-
|
83 |
-
if(cv2.waitKey(10) == ord('q')):
|
84 |
-
break
|
85 |
-
|
86 |
-
|
87 |
-
if num_landmarks == 68:
|
88 |
-
left_eye = np.average(gt_landmarks[36:42], axis=0)
|
89 |
-
right_eye = np.average(gt_landmarks[42:48], axis=0)
|
90 |
-
norm_factor = np.linalg.norm(left_eye - right_eye)
|
91 |
-
# norm_factor = np.linalg.norm(gt_landmarks[36]- gt_landmarks[45])
|
92 |
-
|
93 |
-
elif num_landmarks == 98:
|
94 |
-
norm_factor = np.linalg.norm(gt_landmarks[60]- gt_landmarks[72])
|
95 |
-
elif num_landmarks == 19:
|
96 |
-
left, top = gt_landmarks[-2, :]
|
97 |
-
right, bottom = gt_landmarks[-1, :]
|
98 |
-
norm_factor = math.sqrt(abs(right - left)*abs(top-bottom))
|
99 |
-
gt_landmarks = gt_landmarks[:-2, :]
|
100 |
-
elif num_landmarks == 29:
|
101 |
-
# norm_factor = np.linalg.norm(gt_landmarks[8]- gt_landmarks[9])
|
102 |
-
norm_factor = np.linalg.norm(gt_landmarks[16]- gt_landmarks[17])
|
103 |
-
single_nme = (np.sum(np.linalg.norm(pred_landmarks*4 - gt_landmarks, axis=1)) / pred_landmarks.shape[0]) / norm_factor
|
104 |
-
|
105 |
-
nmes.append(single_nme)
|
106 |
-
total_count += 1
|
107 |
-
if single_nme > 0.1:
|
108 |
-
fail_count += 1
|
109 |
-
if step % 10 == 0:
|
110 |
-
print('Step {} Time: {:.6f} Input Mean: {:.6f} Output Mean: {:.6f}'.format(
|
111 |
-
step, step_end - step_start,
|
112 |
-
torch.mean(labels),
|
113 |
-
torch.mean(outputs[0])))
|
114 |
-
# gt_landmarks = landmarks.numpy()
|
115 |
-
# pred_heatmap = outputs[-1].to('cpu').numpy()
|
116 |
-
gt_landmarks = landmarks
|
117 |
-
batch_nme = fan_NME(outputs[-1][:, :-1, :, :].detach().cpu(), gt_landmarks, num_landmarks)
|
118 |
-
# batch_nme = 0
|
119 |
-
total_nme += batch_nme
|
120 |
-
epoch_nme = total_nme / dataset_sizes['val']
|
121 |
-
global_nme += epoch_nme
|
122 |
-
nme_save_path = os.path.join(save_path, 'nme_log.npy')
|
123 |
-
np.save(nme_save_path, np.array(nmes))
|
124 |
-
print('NME: {:.6f} Failure Rate: {:.6f} Total Count: {:.6f} Fail Count: {:.6f}'.format(epoch_nme, fail_count/total_count, total_count, fail_count))
|
125 |
-
print('Evaluation done! Average NME: {:.6f}'.format(global_nme/epoches))
|
126 |
-
print('Everage runtime for a single batch: {:.6f}'.format(total_runtime/run_count))
|
127 |
-
return model
|
128 |
-
|
129 |
-
|
130 |
-
def vis_landmark_on_img(img, shape, linewidth=2):
|
131 |
-
'''
|
132 |
-
Visualize landmark on images.
|
133 |
-
'''
|
134 |
-
|
135 |
-
def draw_curve(idx_list, color=(0, 255, 0), loop=False, lineWidth=linewidth):
|
136 |
-
for i in idx_list:
|
137 |
-
cv2.line(img, (shape[i, 0], shape[i, 1]), (shape[i + 1, 0], shape[i + 1, 1]), color, lineWidth)
|
138 |
-
if (loop):
|
139 |
-
cv2.line(img, (shape[idx_list[0], 0], shape[idx_list[0], 1]),
|
140 |
-
(shape[idx_list[-1] + 1, 0], shape[idx_list[-1] + 1, 1]), color, lineWidth)
|
141 |
-
|
142 |
-
draw_curve(list(range(0, 32))) # jaw
|
143 |
-
draw_curve(list(range(33, 41)), color=(0, 0, 255), loop=True) # eye brow
|
144 |
-
draw_curve(list(range(42, 50)), color=(0, 0, 255), loop=True)
|
145 |
-
draw_curve(list(range(51, 59))) # nose
|
146 |
-
draw_curve(list(range(60, 67)), loop=True) # eyes
|
147 |
-
draw_curve(list(range(68, 75)), loop=True)
|
148 |
-
draw_curve(list(range(76, 87)), loop=True, color=(0, 255, 255)) # mouth
|
149 |
-
draw_curve(list(range(88, 95)), loop=True, color=(255, 255, 0))
|
150 |
-
|
151 |
return img
|
|
|
1 |
+
import matplotlib
|
2 |
+
matplotlib.use('Agg')
|
3 |
+
import math
|
4 |
+
import torch
|
5 |
+
import copy
|
6 |
+
import time
|
7 |
+
from torch.autograd import Variable
|
8 |
+
import shutil
|
9 |
+
from skimage import io
|
10 |
+
import numpy as np
|
11 |
+
from utils.utils import fan_NME, show_landmarks, get_preds_fromhm
|
12 |
+
from PIL import Image, ImageDraw
|
13 |
+
import os
|
14 |
+
import sys
|
15 |
+
import cv2
|
16 |
+
import matplotlib.pyplot as plt
|
17 |
+
|
18 |
+
|
19 |
+
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
20 |
+
|
21 |
+
def eval_model(model, dataloaders, dataset_sizes,
|
22 |
+
writer, use_gpu=True, epoches=5, dataset='val',
|
23 |
+
save_path='./', num_landmarks=68):
|
24 |
+
global_nme = 0
|
25 |
+
model.eval()
|
26 |
+
for epoch in range(epoches):
|
27 |
+
running_loss = 0
|
28 |
+
step = 0
|
29 |
+
total_nme = 0
|
30 |
+
total_count = 0
|
31 |
+
fail_count = 0
|
32 |
+
nmes = []
|
33 |
+
# running_corrects = 0
|
34 |
+
|
35 |
+
# Iterate over data.
|
36 |
+
with torch.no_grad():
|
37 |
+
for data in dataloaders[dataset]:
|
38 |
+
total_runtime = 0
|
39 |
+
run_count = 0
|
40 |
+
step_start = time.time()
|
41 |
+
step += 1
|
42 |
+
# get the inputs
|
43 |
+
inputs = data['image'].type(torch.FloatTensor)
|
44 |
+
labels_heatmap = data['heatmap'].type(torch.FloatTensor)
|
45 |
+
labels_boundary = data['boundary'].type(torch.FloatTensor)
|
46 |
+
landmarks = data['landmarks'].type(torch.FloatTensor)
|
47 |
+
loss_weight_map = data['weight_map'].type(torch.FloatTensor)
|
48 |
+
# wrap them in Variable
|
49 |
+
if use_gpu:
|
50 |
+
inputs = inputs.to(device)
|
51 |
+
labels_heatmap = labels_heatmap.to(device)
|
52 |
+
labels_boundary = labels_boundary.to(device)
|
53 |
+
loss_weight_map = loss_weight_map.to(device)
|
54 |
+
else:
|
55 |
+
inputs, labels_heatmap = Variable(inputs), Variable(labels_heatmap)
|
56 |
+
labels_boundary = Variable(labels_boundary)
|
57 |
+
labels = torch.cat((labels_heatmap, labels_boundary), 1)
|
58 |
+
single_start = time.time()
|
59 |
+
outputs, boundary_channels = model(inputs)
|
60 |
+
single_end = time.time()
|
61 |
+
total_runtime += time.time() - single_start
|
62 |
+
run_count += 1
|
63 |
+
step_end = time.time()
|
64 |
+
for i in range(inputs.shape[0]):
|
65 |
+
print(inputs.shape)
|
66 |
+
img = inputs[i]
|
67 |
+
img = img.cpu().numpy()
|
68 |
+
img = img.transpose((1, 2, 0)) #*255.0
|
69 |
+
# img = img.astype(np.uint8)
|
70 |
+
# img = Image.fromarray(img)
|
71 |
+
# pred_heatmap = outputs[-1][i].detach().cpu()[:-1, :, :]
|
72 |
+
pred_heatmap = outputs[-1][:, :-1, :, :][i].detach().cpu()
|
73 |
+
pred_landmarks, _ = get_preds_fromhm(pred_heatmap.unsqueeze(0))
|
74 |
+
pred_landmarks = pred_landmarks.squeeze().numpy()
|
75 |
+
|
76 |
+
gt_landmarks = data['landmarks'][i].numpy()
|
77 |
+
print(pred_landmarks, gt_landmarks)
|
78 |
+
import cv2
|
79 |
+
while(True):
|
80 |
+
imgshow = vis_landmark_on_img(cv2.UMat(img), pred_landmarks*4)
|
81 |
+
cv2.imshow('img', imgshow)
|
82 |
+
|
83 |
+
if(cv2.waitKey(10) == ord('q')):
|
84 |
+
break
|
85 |
+
|
86 |
+
|
87 |
+
if num_landmarks == 68:
|
88 |
+
left_eye = np.average(gt_landmarks[36:42], axis=0)
|
89 |
+
right_eye = np.average(gt_landmarks[42:48], axis=0)
|
90 |
+
norm_factor = np.linalg.norm(left_eye - right_eye)
|
91 |
+
# norm_factor = np.linalg.norm(gt_landmarks[36]- gt_landmarks[45])
|
92 |
+
|
93 |
+
elif num_landmarks == 98:
|
94 |
+
norm_factor = np.linalg.norm(gt_landmarks[60]- gt_landmarks[72])
|
95 |
+
elif num_landmarks == 19:
|
96 |
+
left, top = gt_landmarks[-2, :]
|
97 |
+
right, bottom = gt_landmarks[-1, :]
|
98 |
+
norm_factor = math.sqrt(abs(right - left)*abs(top-bottom))
|
99 |
+
gt_landmarks = gt_landmarks[:-2, :]
|
100 |
+
elif num_landmarks == 29:
|
101 |
+
# norm_factor = np.linalg.norm(gt_landmarks[8]- gt_landmarks[9])
|
102 |
+
norm_factor = np.linalg.norm(gt_landmarks[16]- gt_landmarks[17])
|
103 |
+
single_nme = (np.sum(np.linalg.norm(pred_landmarks*4 - gt_landmarks, axis=1)) / pred_landmarks.shape[0]) / norm_factor
|
104 |
+
|
105 |
+
nmes.append(single_nme)
|
106 |
+
total_count += 1
|
107 |
+
if single_nme > 0.1:
|
108 |
+
fail_count += 1
|
109 |
+
if step % 10 == 0:
|
110 |
+
print('Step {} Time: {:.6f} Input Mean: {:.6f} Output Mean: {:.6f}'.format(
|
111 |
+
step, step_end - step_start,
|
112 |
+
torch.mean(labels),
|
113 |
+
torch.mean(outputs[0])))
|
114 |
+
# gt_landmarks = landmarks.numpy()
|
115 |
+
# pred_heatmap = outputs[-1].to('cpu').numpy()
|
116 |
+
gt_landmarks = landmarks
|
117 |
+
batch_nme = fan_NME(outputs[-1][:, :-1, :, :].detach().cpu(), gt_landmarks, num_landmarks)
|
118 |
+
# batch_nme = 0
|
119 |
+
total_nme += batch_nme
|
120 |
+
epoch_nme = total_nme / dataset_sizes['val']
|
121 |
+
global_nme += epoch_nme
|
122 |
+
nme_save_path = os.path.join(save_path, 'nme_log.npy')
|
123 |
+
np.save(nme_save_path, np.array(nmes))
|
124 |
+
print('NME: {:.6f} Failure Rate: {:.6f} Total Count: {:.6f} Fail Count: {:.6f}'.format(epoch_nme, fail_count/total_count, total_count, fail_count))
|
125 |
+
print('Evaluation done! Average NME: {:.6f}'.format(global_nme/epoches))
|
126 |
+
print('Everage runtime for a single batch: {:.6f}'.format(total_runtime/run_count))
|
127 |
+
return model
|
128 |
+
|
129 |
+
|
130 |
+
def vis_landmark_on_img(img, shape, linewidth=2):
|
131 |
+
'''
|
132 |
+
Visualize landmark on images.
|
133 |
+
'''
|
134 |
+
|
135 |
+
def draw_curve(idx_list, color=(0, 255, 0), loop=False, lineWidth=linewidth):
|
136 |
+
for i in idx_list:
|
137 |
+
cv2.line(img, (shape[i, 0], shape[i, 1]), (shape[i + 1, 0], shape[i + 1, 1]), color, lineWidth)
|
138 |
+
if (loop):
|
139 |
+
cv2.line(img, (shape[idx_list[0], 0], shape[idx_list[0], 1]),
|
140 |
+
(shape[idx_list[-1] + 1, 0], shape[idx_list[-1] + 1, 1]), color, lineWidth)
|
141 |
+
|
142 |
+
draw_curve(list(range(0, 32))) # jaw
|
143 |
+
draw_curve(list(range(33, 41)), color=(0, 0, 255), loop=True) # eye brow
|
144 |
+
draw_curve(list(range(42, 50)), color=(0, 0, 255), loop=True)
|
145 |
+
draw_curve(list(range(51, 59))) # nose
|
146 |
+
draw_curve(list(range(60, 67)), loop=True) # eyes
|
147 |
+
draw_curve(list(range(68, 75)), loop=True)
|
148 |
+
draw_curve(list(range(76, 87)), loop=True, color=(0, 255, 255)) # mouth
|
149 |
+
draw_curve(list(range(88, 95)), loop=True, color=(255, 255, 0))
|
150 |
+
|
151 |
return img
|
MakeItTalk/thirdparty/AdaptiveWingLoss/core/models.py
CHANGED
@@ -1,228 +1,228 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
import torch.nn.functional as F
|
4 |
-
import math
|
5 |
-
from core.coord_conv import CoordConvTh
|
6 |
-
|
7 |
-
|
8 |
-
def conv3x3(in_planes, out_planes, strd=1, padding=1,
|
9 |
-
bias=False,dilation=1):
|
10 |
-
"3x3 convolution with padding"
|
11 |
-
return nn.Conv2d(in_planes, out_planes, kernel_size=3,
|
12 |
-
stride=strd, padding=padding, bias=bias,
|
13 |
-
dilation=dilation)
|
14 |
-
|
15 |
-
class BasicBlock(nn.Module):
|
16 |
-
expansion = 1
|
17 |
-
|
18 |
-
def __init__(self, inplanes, planes, stride=1, downsample=None):
|
19 |
-
super(BasicBlock, self).__init__()
|
20 |
-
self.conv1 = conv3x3(inplanes, planes, stride)
|
21 |
-
# self.bn1 = nn.BatchNorm2d(planes)
|
22 |
-
self.relu = nn.ReLU(inplace=True)
|
23 |
-
self.conv2 = conv3x3(planes, planes)
|
24 |
-
# self.bn2 = nn.BatchNorm2d(planes)
|
25 |
-
self.downsample = downsample
|
26 |
-
self.stride = stride
|
27 |
-
|
28 |
-
def forward(self, x):
|
29 |
-
residual = x
|
30 |
-
|
31 |
-
out = self.conv1(x)
|
32 |
-
# out = self.bn1(out)
|
33 |
-
out = self.relu(out)
|
34 |
-
|
35 |
-
out = self.conv2(out)
|
36 |
-
# out = self.bn2(out)
|
37 |
-
|
38 |
-
if self.downsample is not None:
|
39 |
-
residual = self.downsample(x)
|
40 |
-
|
41 |
-
out += residual
|
42 |
-
out = self.relu(out)
|
43 |
-
|
44 |
-
return out
|
45 |
-
|
46 |
-
class ConvBlock(nn.Module):
|
47 |
-
def __init__(self, in_planes, out_planes):
|
48 |
-
super(ConvBlock, self).__init__()
|
49 |
-
self.bn1 = nn.BatchNorm2d(in_planes)
|
50 |
-
self.conv1 = conv3x3(in_planes, int(out_planes / 2))
|
51 |
-
self.bn2 = nn.BatchNorm2d(int(out_planes / 2))
|
52 |
-
self.conv2 = conv3x3(int(out_planes / 2), int(out_planes / 4),
|
53 |
-
padding=1, dilation=1)
|
54 |
-
self.bn3 = nn.BatchNorm2d(int(out_planes / 4))
|
55 |
-
self.conv3 = conv3x3(int(out_planes / 4), int(out_planes / 4),
|
56 |
-
padding=1, dilation=1)
|
57 |
-
|
58 |
-
if in_planes != out_planes:
|
59 |
-
self.downsample = nn.Sequential(
|
60 |
-
nn.BatchNorm2d(in_planes),
|
61 |
-
nn.ReLU(True),
|
62 |
-
nn.Conv2d(in_planes, out_planes,
|
63 |
-
kernel_size=1, stride=1, bias=False),
|
64 |
-
)
|
65 |
-
else:
|
66 |
-
self.downsample = None
|
67 |
-
|
68 |
-
def forward(self, x):
|
69 |
-
residual = x
|
70 |
-
|
71 |
-
out1 = self.bn1(x)
|
72 |
-
out1 = F.relu(out1, True)
|
73 |
-
out1 = self.conv1(out1)
|
74 |
-
|
75 |
-
out2 = self.bn2(out1)
|
76 |
-
out2 = F.relu(out2, True)
|
77 |
-
out2 = self.conv2(out2)
|
78 |
-
|
79 |
-
out3 = self.bn3(out2)
|
80 |
-
out3 = F.relu(out3, True)
|
81 |
-
out3 = self.conv3(out3)
|
82 |
-
|
83 |
-
out3 = torch.cat((out1, out2, out3), 1)
|
84 |
-
|
85 |
-
if self.downsample is not None:
|
86 |
-
residual = self.downsample(residual)
|
87 |
-
|
88 |
-
out3 += residual
|
89 |
-
|
90 |
-
return out3
|
91 |
-
|
92 |
-
class HourGlass(nn.Module):
|
93 |
-
def __init__(self, num_modules, depth, num_features, first_one=False):
|
94 |
-
super(HourGlass, self).__init__()
|
95 |
-
self.num_modules = num_modules
|
96 |
-
self.depth = depth
|
97 |
-
self.features = num_features
|
98 |
-
self.coordconv = CoordConvTh(x_dim=64, y_dim=64,
|
99 |
-
with_r=True, with_boundary=True,
|
100 |
-
in_channels=256, first_one=first_one,
|
101 |
-
out_channels=256,
|
102 |
-
kernel_size=1,
|
103 |
-
stride=1, padding=0)
|
104 |
-
self._generate_network(self.depth)
|
105 |
-
|
106 |
-
def _generate_network(self, level):
|
107 |
-
self.add_module('b1_' + str(level), ConvBlock(256, 256))
|
108 |
-
|
109 |
-
self.add_module('b2_' + str(level), ConvBlock(256, 256))
|
110 |
-
|
111 |
-
if level > 1:
|
112 |
-
self._generate_network(level - 1)
|
113 |
-
else:
|
114 |
-
self.add_module('b2_plus_' + str(level), ConvBlock(256, 256))
|
115 |
-
|
116 |
-
self.add_module('b3_' + str(level), ConvBlock(256, 256))
|
117 |
-
|
118 |
-
def _forward(self, level, inp):
|
119 |
-
# Upper branch
|
120 |
-
up1 = inp
|
121 |
-
up1 = self._modules['b1_' + str(level)](up1)
|
122 |
-
|
123 |
-
# Lower branch
|
124 |
-
low1 = F.avg_pool2d(inp, 2, stride=2)
|
125 |
-
low1 = self._modules['b2_' + str(level)](low1)
|
126 |
-
|
127 |
-
if level > 1:
|
128 |
-
low2 = self._forward(level - 1, low1)
|
129 |
-
else:
|
130 |
-
low2 = low1
|
131 |
-
low2 = self._modules['b2_plus_' + str(level)](low2)
|
132 |
-
|
133 |
-
low3 = low2
|
134 |
-
low3 = self._modules['b3_' + str(level)](low3)
|
135 |
-
|
136 |
-
up2 = F.upsample(low3, scale_factor=2, mode='nearest')
|
137 |
-
|
138 |
-
return up1 + up2
|
139 |
-
|
140 |
-
def forward(self, x, heatmap):
|
141 |
-
x, last_channel = self.coordconv(x, heatmap)
|
142 |
-
return self._forward(self.depth, x), last_channel
|
143 |
-
|
144 |
-
class FAN(nn.Module):
|
145 |
-
|
146 |
-
def __init__(self, num_modules=1, end_relu=False, gray_scale=False,
|
147 |
-
num_landmarks=68):
|
148 |
-
super(FAN, self).__init__()
|
149 |
-
self.num_modules = num_modules
|
150 |
-
self.gray_scale = gray_scale
|
151 |
-
self.end_relu = end_relu
|
152 |
-
self.num_landmarks = num_landmarks
|
153 |
-
|
154 |
-
# Base part
|
155 |
-
if self.gray_scale:
|
156 |
-
self.conv1 = CoordConvTh(x_dim=256, y_dim=256,
|
157 |
-
with_r=True, with_boundary=False,
|
158 |
-
in_channels=3, out_channels=64,
|
159 |
-
kernel_size=7,
|
160 |
-
stride=2, padding=3)
|
161 |
-
else:
|
162 |
-
self.conv1 = CoordConvTh(x_dim=256, y_dim=256,
|
163 |
-
with_r=True, with_boundary=False,
|
164 |
-
in_channels=3, out_channels=64,
|
165 |
-
kernel_size=7,
|
166 |
-
stride=2, padding=3)
|
167 |
-
self.bn1 = nn.BatchNorm2d(64)
|
168 |
-
self.conv2 = ConvBlock(64, 128)
|
169 |
-
self.conv3 = ConvBlock(128, 128)
|
170 |
-
self.conv4 = ConvBlock(128, 256)
|
171 |
-
|
172 |
-
# Stacking part
|
173 |
-
for hg_module in range(self.num_modules):
|
174 |
-
if hg_module == 0:
|
175 |
-
first_one = True
|
176 |
-
else:
|
177 |
-
first_one = False
|
178 |
-
self.add_module('m' + str(hg_module), HourGlass(1, 4, 256,
|
179 |
-
first_one))
|
180 |
-
self.add_module('top_m_' + str(hg_module), ConvBlock(256, 256))
|
181 |
-
self.add_module('conv_last' + str(hg_module),
|
182 |
-
nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0))
|
183 |
-
self.add_module('bn_end' + str(hg_module), nn.BatchNorm2d(256))
|
184 |
-
self.add_module('l' + str(hg_module), nn.Conv2d(256,
|
185 |
-
num_landmarks+1, kernel_size=1, stride=1, padding=0))
|
186 |
-
|
187 |
-
if hg_module < self.num_modules - 1:
|
188 |
-
self.add_module(
|
189 |
-
'bl' + str(hg_module), nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0))
|
190 |
-
self.add_module('al' + str(hg_module), nn.Conv2d(num_landmarks+1,
|
191 |
-
256, kernel_size=1, stride=1, padding=0))
|
192 |
-
|
193 |
-
def forward(self, x):
|
194 |
-
x, _ = self.conv1(x)
|
195 |
-
x = F.relu(self.bn1(x), True)
|
196 |
-
# x = F.relu(self.bn1(self.conv1(x)), True)
|
197 |
-
x = F.avg_pool2d(self.conv2(x), 2, stride=2)
|
198 |
-
x = self.conv3(x)
|
199 |
-
x = self.conv4(x)
|
200 |
-
|
201 |
-
previous = x
|
202 |
-
|
203 |
-
outputs = []
|
204 |
-
boundary_channels = []
|
205 |
-
tmp_out = None
|
206 |
-
for i in range(self.num_modules):
|
207 |
-
hg, boundary_channel = self._modules['m' + str(i)](previous,
|
208 |
-
tmp_out)
|
209 |
-
|
210 |
-
ll = hg
|
211 |
-
ll = self._modules['top_m_' + str(i)](ll)
|
212 |
-
|
213 |
-
ll = F.relu(self._modules['bn_end' + str(i)]
|
214 |
-
(self._modules['conv_last' + str(i)](ll)), True)
|
215 |
-
|
216 |
-
# Predict heatmaps
|
217 |
-
tmp_out = self._modules['l' + str(i)](ll)
|
218 |
-
if self.end_relu:
|
219 |
-
tmp_out = F.relu(tmp_out) # HACK: Added relu
|
220 |
-
outputs.append(tmp_out)
|
221 |
-
boundary_channels.append(boundary_channel)
|
222 |
-
|
223 |
-
if i < self.num_modules - 1:
|
224 |
-
ll = self._modules['bl' + str(i)](ll)
|
225 |
-
tmp_out_ = self._modules['al' + str(i)](tmp_out)
|
226 |
-
previous = previous + ll + tmp_out_
|
227 |
-
|
228 |
-
return outputs, boundary_channels
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
import torch.nn.functional as F
|
4 |
+
import math
|
5 |
+
from core.coord_conv import CoordConvTh
|
6 |
+
|
7 |
+
|
8 |
+
def conv3x3(in_planes, out_planes, strd=1, padding=1,
|
9 |
+
bias=False,dilation=1):
|
10 |
+
"3x3 convolution with padding"
|
11 |
+
return nn.Conv2d(in_planes, out_planes, kernel_size=3,
|
12 |
+
stride=strd, padding=padding, bias=bias,
|
13 |
+
dilation=dilation)
|
14 |
+
|
15 |
+
class BasicBlock(nn.Module):
|
16 |
+
expansion = 1
|
17 |
+
|
18 |
+
def __init__(self, inplanes, planes, stride=1, downsample=None):
|
19 |
+
super(BasicBlock, self).__init__()
|
20 |
+
self.conv1 = conv3x3(inplanes, planes, stride)
|
21 |
+
# self.bn1 = nn.BatchNorm2d(planes)
|
22 |
+
self.relu = nn.ReLU(inplace=True)
|
23 |
+
self.conv2 = conv3x3(planes, planes)
|
24 |
+
# self.bn2 = nn.BatchNorm2d(planes)
|
25 |
+
self.downsample = downsample
|
26 |
+
self.stride = stride
|
27 |
+
|
28 |
+
def forward(self, x):
|
29 |
+
residual = x
|
30 |
+
|
31 |
+
out = self.conv1(x)
|
32 |
+
# out = self.bn1(out)
|
33 |
+
out = self.relu(out)
|
34 |
+
|
35 |
+
out = self.conv2(out)
|
36 |
+
# out = self.bn2(out)
|
37 |
+
|
38 |
+
if self.downsample is not None:
|
39 |
+
residual = self.downsample(x)
|
40 |
+
|
41 |
+
out += residual
|
42 |
+
out = self.relu(out)
|
43 |
+
|
44 |
+
return out
|
45 |
+
|
46 |
+
class ConvBlock(nn.Module):
|
47 |
+
def __init__(self, in_planes, out_planes):
|
48 |
+
super(ConvBlock, self).__init__()
|
49 |
+
self.bn1 = nn.BatchNorm2d(in_planes)
|
50 |
+
self.conv1 = conv3x3(in_planes, int(out_planes / 2))
|
51 |
+
self.bn2 = nn.BatchNorm2d(int(out_planes / 2))
|
52 |
+
self.conv2 = conv3x3(int(out_planes / 2), int(out_planes / 4),
|
53 |
+
padding=1, dilation=1)
|
54 |
+
self.bn3 = nn.BatchNorm2d(int(out_planes / 4))
|
55 |
+
self.conv3 = conv3x3(int(out_planes / 4), int(out_planes / 4),
|
56 |
+
padding=1, dilation=1)
|
57 |
+
|
58 |
+
if in_planes != out_planes:
|
59 |
+
self.downsample = nn.Sequential(
|
60 |
+
nn.BatchNorm2d(in_planes),
|
61 |
+
nn.ReLU(True),
|
62 |
+
nn.Conv2d(in_planes, out_planes,
|
63 |
+
kernel_size=1, stride=1, bias=False),
|
64 |
+
)
|
65 |
+
else:
|
66 |
+
self.downsample = None
|
67 |
+
|
68 |
+
def forward(self, x):
|
69 |
+
residual = x
|
70 |
+
|
71 |
+
out1 = self.bn1(x)
|
72 |
+
out1 = F.relu(out1, True)
|
73 |
+
out1 = self.conv1(out1)
|
74 |
+
|
75 |
+
out2 = self.bn2(out1)
|
76 |
+
out2 = F.relu(out2, True)
|
77 |
+
out2 = self.conv2(out2)
|
78 |
+
|
79 |
+
out3 = self.bn3(out2)
|
80 |
+
out3 = F.relu(out3, True)
|
81 |
+
out3 = self.conv3(out3)
|
82 |
+
|
83 |
+
out3 = torch.cat((out1, out2, out3), 1)
|
84 |
+
|
85 |
+
if self.downsample is not None:
|
86 |
+
residual = self.downsample(residual)
|
87 |
+
|
88 |
+
out3 += residual
|
89 |
+
|
90 |
+
return out3
|
91 |
+
|
92 |
+
class HourGlass(nn.Module):
|
93 |
+
def __init__(self, num_modules, depth, num_features, first_one=False):
|
94 |
+
super(HourGlass, self).__init__()
|
95 |
+
self.num_modules = num_modules
|
96 |
+
self.depth = depth
|
97 |
+
self.features = num_features
|
98 |
+
self.coordconv = CoordConvTh(x_dim=64, y_dim=64,
|
99 |
+
with_r=True, with_boundary=True,
|
100 |
+
in_channels=256, first_one=first_one,
|
101 |
+
out_channels=256,
|
102 |
+
kernel_size=1,
|
103 |
+
stride=1, padding=0)
|
104 |
+
self._generate_network(self.depth)
|
105 |
+
|
106 |
+
def _generate_network(self, level):
|
107 |
+
self.add_module('b1_' + str(level), ConvBlock(256, 256))
|
108 |
+
|
109 |
+
self.add_module('b2_' + str(level), ConvBlock(256, 256))
|
110 |
+
|
111 |
+
if level > 1:
|
112 |
+
self._generate_network(level - 1)
|
113 |
+
else:
|
114 |
+
self.add_module('b2_plus_' + str(level), ConvBlock(256, 256))
|
115 |
+
|
116 |
+
self.add_module('b3_' + str(level), ConvBlock(256, 256))
|
117 |
+
|
118 |
+
def _forward(self, level, inp):
|
119 |
+
# Upper branch
|
120 |
+
up1 = inp
|
121 |
+
up1 = self._modules['b1_' + str(level)](up1)
|
122 |
+
|
123 |
+
# Lower branch
|
124 |
+
low1 = F.avg_pool2d(inp, 2, stride=2)
|
125 |
+
low1 = self._modules['b2_' + str(level)](low1)
|
126 |
+
|
127 |
+
if level > 1:
|
128 |
+
low2 = self._forward(level - 1, low1)
|
129 |
+
else:
|
130 |
+
low2 = low1
|
131 |
+
low2 = self._modules['b2_plus_' + str(level)](low2)
|
132 |
+
|
133 |
+
low3 = low2
|
134 |
+
low3 = self._modules['b3_' + str(level)](low3)
|
135 |
+
|
136 |
+
up2 = F.upsample(low3, scale_factor=2, mode='nearest')
|
137 |
+
|
138 |
+
return up1 + up2
|
139 |
+
|
140 |
+
def forward(self, x, heatmap):
|
141 |
+
x, last_channel = self.coordconv(x, heatmap)
|
142 |
+
return self._forward(self.depth, x), last_channel
|
143 |
+
|
144 |
+
class FAN(nn.Module):
|
145 |
+
|
146 |
+
def __init__(self, num_modules=1, end_relu=False, gray_scale=False,
|
147 |
+
num_landmarks=68):
|
148 |
+
super(FAN, self).__init__()
|
149 |
+
self.num_modules = num_modules
|
150 |
+
self.gray_scale = gray_scale
|
151 |
+
self.end_relu = end_relu
|
152 |
+
self.num_landmarks = num_landmarks
|
153 |
+
|
154 |
+
# Base part
|
155 |
+
if self.gray_scale:
|
156 |
+
self.conv1 = CoordConvTh(x_dim=256, y_dim=256,
|
157 |
+
with_r=True, with_boundary=False,
|
158 |
+
in_channels=3, out_channels=64,
|
159 |
+
kernel_size=7,
|
160 |
+
stride=2, padding=3)
|
161 |
+
else:
|
162 |
+
self.conv1 = CoordConvTh(x_dim=256, y_dim=256,
|
163 |
+
with_r=True, with_boundary=False,
|
164 |
+
in_channels=3, out_channels=64,
|
165 |
+
kernel_size=7,
|
166 |
+
stride=2, padding=3)
|
167 |
+
self.bn1 = nn.BatchNorm2d(64)
|
168 |
+
self.conv2 = ConvBlock(64, 128)
|
169 |
+
self.conv3 = ConvBlock(128, 128)
|
170 |
+
self.conv4 = ConvBlock(128, 256)
|
171 |
+
|
172 |
+
# Stacking part
|
173 |
+
for hg_module in range(self.num_modules):
|
174 |
+
if hg_module == 0:
|
175 |
+
first_one = True
|
176 |
+
else:
|
177 |
+
first_one = False
|
178 |
+
self.add_module('m' + str(hg_module), HourGlass(1, 4, 256,
|
179 |
+
first_one))
|
180 |
+
self.add_module('top_m_' + str(hg_module), ConvBlock(256, 256))
|
181 |
+
self.add_module('conv_last' + str(hg_module),
|
182 |
+
nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0))
|
183 |
+
self.add_module('bn_end' + str(hg_module), nn.BatchNorm2d(256))
|
184 |
+
self.add_module('l' + str(hg_module), nn.Conv2d(256,
|
185 |
+
num_landmarks+1, kernel_size=1, stride=1, padding=0))
|
186 |
+
|
187 |
+
if hg_module < self.num_modules - 1:
|
188 |
+
self.add_module(
|
189 |
+
'bl' + str(hg_module), nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0))
|
190 |
+
self.add_module('al' + str(hg_module), nn.Conv2d(num_landmarks+1,
|
191 |
+
256, kernel_size=1, stride=1, padding=0))
|
192 |
+
|
193 |
+
def forward(self, x):
|
194 |
+
x, _ = self.conv1(x)
|
195 |
+
x = F.relu(self.bn1(x), True)
|
196 |
+
# x = F.relu(self.bn1(self.conv1(x)), True)
|
197 |
+
x = F.avg_pool2d(self.conv2(x), 2, stride=2)
|
198 |
+
x = self.conv3(x)
|
199 |
+
x = self.conv4(x)
|
200 |
+
|
201 |
+
previous = x
|
202 |
+
|
203 |
+
outputs = []
|
204 |
+
boundary_channels = []
|
205 |
+
tmp_out = None
|
206 |
+
for i in range(self.num_modules):
|
207 |
+
hg, boundary_channel = self._modules['m' + str(i)](previous,
|
208 |
+
tmp_out)
|
209 |
+
|
210 |
+
ll = hg
|
211 |
+
ll = self._modules['top_m_' + str(i)](ll)
|
212 |
+
|
213 |
+
ll = F.relu(self._modules['bn_end' + str(i)]
|
214 |
+
(self._modules['conv_last' + str(i)](ll)), True)
|
215 |
+
|
216 |
+
# Predict heatmaps
|
217 |
+
tmp_out = self._modules['l' + str(i)](ll)
|
218 |
+
if self.end_relu:
|
219 |
+
tmp_out = F.relu(tmp_out) # HACK: Added relu
|
220 |
+
outputs.append(tmp_out)
|
221 |
+
boundary_channels.append(boundary_channel)
|
222 |
+
|
223 |
+
if i < self.num_modules - 1:
|
224 |
+
ll = self._modules['bl' + str(i)](ll)
|
225 |
+
tmp_out_ = self._modules['al' + str(i)](tmp_out)
|
226 |
+
previous = previous + ll + tmp_out_
|
227 |
+
|
228 |
+
return outputs, boundary_channels
|
MakeItTalk/thirdparty/AdaptiveWingLoss/eval.py
CHANGED
@@ -1,77 +1,77 @@
|
|
1 |
-
from __future__ import print_function, division
|
2 |
-
import torch
|
3 |
-
import argparse
|
4 |
-
import numpy as np
|
5 |
-
import torch.nn as nn
|
6 |
-
import time
|
7 |
-
import os
|
8 |
-
from core.evaler import eval_model
|
9 |
-
from core.dataloader import get_dataset
|
10 |
-
from core import models
|
11 |
-
from tensorboardX import SummaryWriter
|
12 |
-
|
13 |
-
# Parse arguments
|
14 |
-
parser = argparse.ArgumentParser()
|
15 |
-
# Dataset paths
|
16 |
-
parser.add_argument('--val_img_dir', type=str,
|
17 |
-
help='Validation image directory')
|
18 |
-
parser.add_argument('--val_landmarks_dir', type=str,
|
19 |
-
help='Validation landmarks directory')
|
20 |
-
parser.add_argument('--num_landmarks', type=int, default=68,
|
21 |
-
help='Number of landmarks')
|
22 |
-
|
23 |
-
# Checkpoint and pretrained weights
|
24 |
-
parser.add_argument('--ckpt_save_path', type=str,
|
25 |
-
help='a directory to save checkpoint file')
|
26 |
-
parser.add_argument('--pretrained_weights', type=str,
|
27 |
-
help='a directory to save pretrained_weights')
|
28 |
-
|
29 |
-
# Eval options
|
30 |
-
parser.add_argument('--batch_size', type=int, default=25,
|
31 |
-
help='learning rate decay after each epoch')
|
32 |
-
|
33 |
-
# Network parameters
|
34 |
-
parser.add_argument('--hg_blocks', type=int, default=4,
|
35 |
-
help='Number of HG blocks to stack')
|
36 |
-
parser.add_argument('--gray_scale', type=str, default="False",
|
37 |
-
help='Whether to convert RGB image into gray scale during training')
|
38 |
-
parser.add_argument('--end_relu', type=str, default="False",
|
39 |
-
help='Whether to add relu at the end of each HG module')
|
40 |
-
|
41 |
-
args = parser.parse_args()
|
42 |
-
|
43 |
-
VAL_IMG_DIR = args.val_img_dir
|
44 |
-
VAL_LANDMARKS_DIR = args.val_landmarks_dir
|
45 |
-
CKPT_SAVE_PATH = args.ckpt_save_path
|
46 |
-
BATCH_SIZE = args.batch_size
|
47 |
-
PRETRAINED_WEIGHTS = args.pretrained_weights
|
48 |
-
GRAY_SCALE = False if args.gray_scale == 'False' else True
|
49 |
-
HG_BLOCKS = args.hg_blocks
|
50 |
-
END_RELU = False if args.end_relu == 'False' else True
|
51 |
-
NUM_LANDMARKS = args.num_landmarks
|
52 |
-
|
53 |
-
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
54 |
-
|
55 |
-
writer = SummaryWriter(CKPT_SAVE_PATH)
|
56 |
-
|
57 |
-
dataloaders, dataset_sizes = get_dataset(VAL_IMG_DIR, VAL_LANDMARKS_DIR,
|
58 |
-
BATCH_SIZE, NUM_LANDMARKS)
|
59 |
-
use_gpu = torch.cuda.is_available()
|
60 |
-
model_ft = models.FAN(HG_BLOCKS, END_RELU, GRAY_SCALE, NUM_LANDMARKS)
|
61 |
-
|
62 |
-
if PRETRAINED_WEIGHTS != "None":
|
63 |
-
checkpoint = torch.load(PRETRAINED_WEIGHTS)
|
64 |
-
if 'state_dict' not in checkpoint:
|
65 |
-
model_ft.load_state_dict(checkpoint)
|
66 |
-
else:
|
67 |
-
pretrained_weights = checkpoint['state_dict']
|
68 |
-
model_weights = model_ft.state_dict()
|
69 |
-
pretrained_weights = {k: v for k, v in pretrained_weights.items() \
|
70 |
-
if k in model_weights}
|
71 |
-
model_weights.update(pretrained_weights)
|
72 |
-
model_ft.load_state_dict(model_weights)
|
73 |
-
|
74 |
-
model_ft = model_ft.to(device)
|
75 |
-
|
76 |
-
model_ft = eval_model(model_ft, dataloaders, dataset_sizes, writer, use_gpu, 1, 'val', CKPT_SAVE_PATH, NUM_LANDMARKS)
|
77 |
-
|
|
|
1 |
+
from __future__ import print_function, division
|
2 |
+
import torch
|
3 |
+
import argparse
|
4 |
+
import numpy as np
|
5 |
+
import torch.nn as nn
|
6 |
+
import time
|
7 |
+
import os
|
8 |
+
from core.evaler import eval_model
|
9 |
+
from core.dataloader import get_dataset
|
10 |
+
from core import models
|
11 |
+
from tensorboardX import SummaryWriter
|
12 |
+
|
13 |
+
# Parse arguments
|
14 |
+
parser = argparse.ArgumentParser()
|
15 |
+
# Dataset paths
|
16 |
+
parser.add_argument('--val_img_dir', type=str,
|
17 |
+
help='Validation image directory')
|
18 |
+
parser.add_argument('--val_landmarks_dir', type=str,
|
19 |
+
help='Validation landmarks directory')
|
20 |
+
parser.add_argument('--num_landmarks', type=int, default=68,
|
21 |
+
help='Number of landmarks')
|
22 |
+
|
23 |
+
# Checkpoint and pretrained weights
|
24 |
+
parser.add_argument('--ckpt_save_path', type=str,
|
25 |
+
help='a directory to save checkpoint file')
|
26 |
+
parser.add_argument('--pretrained_weights', type=str,
|
27 |
+
help='a directory to save pretrained_weights')
|
28 |
+
|
29 |
+
# Eval options
|
30 |
+
parser.add_argument('--batch_size', type=int, default=25,
|
31 |
+
help='learning rate decay after each epoch')
|
32 |
+
|
33 |
+
# Network parameters
|
34 |
+
parser.add_argument('--hg_blocks', type=int, default=4,
|
35 |
+
help='Number of HG blocks to stack')
|
36 |
+
parser.add_argument('--gray_scale', type=str, default="False",
|
37 |
+
help='Whether to convert RGB image into gray scale during training')
|
38 |
+
parser.add_argument('--end_relu', type=str, default="False",
|
39 |
+
help='Whether to add relu at the end of each HG module')
|
40 |
+
|
41 |
+
args = parser.parse_args()
|
42 |
+
|
43 |
+
VAL_IMG_DIR = args.val_img_dir
|
44 |
+
VAL_LANDMARKS_DIR = args.val_landmarks_dir
|
45 |
+
CKPT_SAVE_PATH = args.ckpt_save_path
|
46 |
+
BATCH_SIZE = args.batch_size
|
47 |
+
PRETRAINED_WEIGHTS = args.pretrained_weights
|
48 |
+
GRAY_SCALE = False if args.gray_scale == 'False' else True
|
49 |
+
HG_BLOCKS = args.hg_blocks
|
50 |
+
END_RELU = False if args.end_relu == 'False' else True
|
51 |
+
NUM_LANDMARKS = args.num_landmarks
|
52 |
+
|
53 |
+
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
54 |
+
|
55 |
+
writer = SummaryWriter(CKPT_SAVE_PATH)
|
56 |
+
|
57 |
+
dataloaders, dataset_sizes = get_dataset(VAL_IMG_DIR, VAL_LANDMARKS_DIR,
|
58 |
+
BATCH_SIZE, NUM_LANDMARKS)
|
59 |
+
use_gpu = torch.cuda.is_available()
|
60 |
+
model_ft = models.FAN(HG_BLOCKS, END_RELU, GRAY_SCALE, NUM_LANDMARKS)
|
61 |
+
|
62 |
+
if PRETRAINED_WEIGHTS != "None":
|
63 |
+
checkpoint = torch.load(PRETRAINED_WEIGHTS)
|
64 |
+
if 'state_dict' not in checkpoint:
|
65 |
+
model_ft.load_state_dict(checkpoint)
|
66 |
+
else:
|
67 |
+
pretrained_weights = checkpoint['state_dict']
|
68 |
+
model_weights = model_ft.state_dict()
|
69 |
+
pretrained_weights = {k: v for k, v in pretrained_weights.items() \
|
70 |
+
if k in model_weights}
|
71 |
+
model_weights.update(pretrained_weights)
|
72 |
+
model_ft.load_state_dict(model_weights)
|
73 |
+
|
74 |
+
model_ft = model_ft.to(device)
|
75 |
+
|
76 |
+
model_ft = eval_model(model_ft, dataloaders, dataset_sizes, writer, use_gpu, 1, 'val', CKPT_SAVE_PATH, NUM_LANDMARKS)
|
77 |
+
|
MakeItTalk/thirdparty/AdaptiveWingLoss/requirements.txt
CHANGED
@@ -1,12 +1,12 @@
|
|
1 |
-
opencv-python
|
2 |
-
scipy>=0.17.0
|
3 |
-
scikit-image
|
4 |
-
numpy
|
5 |
-
matplotlib
|
6 |
-
Pillow>=4.3.0
|
7 |
-
imgaug
|
8 |
-
tensorflow
|
9 |
-
git+https://github.com/lanpa/tensorboardX
|
10 |
-
joblib
|
11 |
-
torch==1.3.0
|
12 |
-
torchvision==0.4.1
|
|
|
1 |
+
opencv-python
|
2 |
+
scipy>=0.17.0
|
3 |
+
scikit-image
|
4 |
+
numpy
|
5 |
+
matplotlib
|
6 |
+
Pillow>=4.3.0
|
7 |
+
imgaug
|
8 |
+
tensorflow
|
9 |
+
git+https://github.com/lanpa/tensorboardX
|
10 |
+
joblib
|
11 |
+
torch==1.3.0
|
12 |
+
torchvision==0.4.1
|
MakeItTalk/thirdparty/AdaptiveWingLoss/scripts/eval_wflw.sh
CHANGED
@@ -1,10 +1,10 @@
|
|
1 |
-
CUDA_VISIBLE_DEVICES=1 python ../eval.py \
|
2 |
-
--val_img_dir='../dataset/WFLW_test/images/' \
|
3 |
-
--val_landmarks_dir='../dataset/WFLW_test/landmarks/' \
|
4 |
-
--ckpt_save_path='../experiments/eval_iccv_0620' \
|
5 |
-
--hg_blocks=4 \
|
6 |
-
--pretrained_weights='../ckpt/WFLW_4HG.pth' \
|
7 |
-
--num_landmarks=98 \
|
8 |
-
--end_relu='False' \
|
9 |
-
--batch_size=20 \
|
10 |
-
|
|
|
1 |
+
CUDA_VISIBLE_DEVICES=1 python ../eval.py \
|
2 |
+
--val_img_dir='../dataset/WFLW_test/images/' \
|
3 |
+
--val_landmarks_dir='../dataset/WFLW_test/landmarks/' \
|
4 |
+
--ckpt_save_path='../experiments/eval_iccv_0620' \
|
5 |
+
--hg_blocks=4 \
|
6 |
+
--pretrained_weights='../ckpt/WFLW_4HG.pth' \
|
7 |
+
--num_landmarks=98 \
|
8 |
+
--end_relu='False' \
|
9 |
+
--batch_size=20 \
|
10 |
+
|
MakeItTalk/thirdparty/AdaptiveWingLoss/utils/utils.py
CHANGED
@@ -1,354 +1,354 @@
|
|
1 |
-
from __future__ import print_function, division
|
2 |
-
import os
|
3 |
-
import sys
|
4 |
-
import math
|
5 |
-
import torch
|
6 |
-
import cv2
|
7 |
-
from PIL import Image
|
8 |
-
from skimage import io
|
9 |
-
from skimage import transform as ski_transform
|
10 |
-
from scipy import ndimage
|
11 |
-
import numpy as np
|
12 |
-
import matplotlib
|
13 |
-
import matplotlib.pyplot as plt
|
14 |
-
from torch.utils.data import Dataset, DataLoader
|
15 |
-
from torchvision import transforms, utils
|
16 |
-
|
17 |
-
def _gaussian(
|
18 |
-
size=3, sigma=0.25, amplitude=1, normalize=False, width=None,
|
19 |
-
height=None, sigma_horz=None, sigma_vert=None, mean_horz=0.5,
|
20 |
-
mean_vert=0.5):
|
21 |
-
# handle some defaults
|
22 |
-
if width is None:
|
23 |
-
width = size
|
24 |
-
if height is None:
|
25 |
-
height = size
|
26 |
-
if sigma_horz is None:
|
27 |
-
sigma_horz = sigma
|
28 |
-
if sigma_vert is None:
|
29 |
-
sigma_vert = sigma
|
30 |
-
center_x = mean_horz * width + 0.5
|
31 |
-
center_y = mean_vert * height + 0.5
|
32 |
-
gauss = np.empty((height, width), dtype=np.float32)
|
33 |
-
# generate kernel
|
34 |
-
for i in range(height):
|
35 |
-
for j in range(width):
|
36 |
-
gauss[i][j] = amplitude * math.exp(-(math.pow((j + 1 - center_x) / (
|
37 |
-
sigma_horz * width), 2) / 2.0 + math.pow((i + 1 - center_y) / (sigma_vert * height), 2) / 2.0))
|
38 |
-
if normalize:
|
39 |
-
gauss = gauss / np.sum(gauss)
|
40 |
-
return gauss
|
41 |
-
|
42 |
-
def draw_gaussian(image, point, sigma):
|
43 |
-
# Check if the gaussian is inside
|
44 |
-
ul = [np.floor(np.floor(point[0]) - 3 * sigma),
|
45 |
-
np.floor(np.floor(point[1]) - 3 * sigma)]
|
46 |
-
br = [np.floor(np.floor(point[0]) + 3 * sigma),
|
47 |
-
np.floor(np.floor(point[1]) + 3 * sigma)]
|
48 |
-
if (ul[0] > image.shape[1] or ul[1] >
|
49 |
-
image.shape[0] or br[0] < 1 or br[1] < 1):
|
50 |
-
return image
|
51 |
-
size = 6 * sigma + 1
|
52 |
-
g = _gaussian(size)
|
53 |
-
g_x = [int(max(1, -ul[0])), int(min(br[0], image.shape[1])) -
|
54 |
-
int(max(1, ul[0])) + int(max(1, -ul[0]))]
|
55 |
-
g_y = [int(max(1, -ul[1])), int(min(br[1], image.shape[0])) -
|
56 |
-
int(max(1, ul[1])) + int(max(1, -ul[1]))]
|
57 |
-
img_x = [int(max(1, ul[0])), int(min(br[0], image.shape[1]))]
|
58 |
-
img_y = [int(max(1, ul[1])), int(min(br[1], image.shape[0]))]
|
59 |
-
assert (g_x[0] > 0 and g_y[1] > 0)
|
60 |
-
correct = False
|
61 |
-
while not correct:
|
62 |
-
try:
|
63 |
-
image[img_y[0] - 1:img_y[1], img_x[0] - 1:img_x[1]
|
64 |
-
] = image[img_y[0] - 1:img_y[1], img_x[0] - 1:img_x[1]] + g[g_y[0] - 1:g_y[1], g_x[0] - 1:g_x[1]]
|
65 |
-
correct = True
|
66 |
-
except:
|
67 |
-
print('img_x: {}, img_y: {}, g_x:{}, g_y:{}, point:{}, g_shape:{}, ul:{}, br:{}'.format(img_x, img_y, g_x, g_y, point, g.shape, ul, br))
|
68 |
-
ul = [np.floor(np.floor(point[0]) - 3 * sigma),
|
69 |
-
np.floor(np.floor(point[1]) - 3 * sigma)]
|
70 |
-
br = [np.floor(np.floor(point[0]) + 3 * sigma),
|
71 |
-
np.floor(np.floor(point[1]) + 3 * sigma)]
|
72 |
-
g_x = [int(max(1, -ul[0])), int(min(br[0], image.shape[1])) -
|
73 |
-
int(max(1, ul[0])) + int(max(1, -ul[0]))]
|
74 |
-
g_y = [int(max(1, -ul[1])), int(min(br[1], image.shape[0])) -
|
75 |
-
int(max(1, ul[1])) + int(max(1, -ul[1]))]
|
76 |
-
img_x = [int(max(1, ul[0])), int(min(br[0], image.shape[1]))]
|
77 |
-
img_y = [int(max(1, ul[1])), int(min(br[1], image.shape[0]))]
|
78 |
-
pass
|
79 |
-
image[image > 1] = 1
|
80 |
-
return image
|
81 |
-
|
82 |
-
def transform(point, center, scale, resolution, rotation=0, invert=False):
|
83 |
-
_pt = np.ones(3)
|
84 |
-
_pt[0] = point[0]
|
85 |
-
_pt[1] = point[1]
|
86 |
-
|
87 |
-
h = 200.0 * scale
|
88 |
-
t = np.eye(3)
|
89 |
-
t[0, 0] = resolution / h
|
90 |
-
t[1, 1] = resolution / h
|
91 |
-
t[0, 2] = resolution * (-center[0] / h + 0.5)
|
92 |
-
t[1, 2] = resolution * (-center[1] / h + 0.5)
|
93 |
-
|
94 |
-
if rotation != 0:
|
95 |
-
rotation = -rotation
|
96 |
-
r = np.eye(3)
|
97 |
-
ang = rotation * math.pi / 180.0
|
98 |
-
s = math.sin(ang)
|
99 |
-
c = math.cos(ang)
|
100 |
-
r[0][0] = c
|
101 |
-
r[0][1] = -s
|
102 |
-
r[1][0] = s
|
103 |
-
r[1][1] = c
|
104 |
-
|
105 |
-
t_ = np.eye(3)
|
106 |
-
t_[0][2] = -resolution / 2.0
|
107 |
-
t_[1][2] = -resolution / 2.0
|
108 |
-
t_inv = torch.eye(3)
|
109 |
-
t_inv[0][2] = resolution / 2.0
|
110 |
-
t_inv[1][2] = resolution / 2.0
|
111 |
-
t = reduce(np.matmul, [t_inv, r, t_, t])
|
112 |
-
|
113 |
-
if invert:
|
114 |
-
t = np.linalg.inv(t)
|
115 |
-
new_point = (np.matmul(t, _pt))[0:2]
|
116 |
-
|
117 |
-
return new_point.astype(int)
|
118 |
-
|
119 |
-
def cv_crop(image, landmarks, center, scale, resolution=256, center_shift=0):
|
120 |
-
new_image = cv2.copyMakeBorder(image, center_shift,
|
121 |
-
center_shift,
|
122 |
-
center_shift,
|
123 |
-
center_shift,
|
124 |
-
cv2.BORDER_CONSTANT, value=[0,0,0])
|
125 |
-
new_landmarks = landmarks.copy()
|
126 |
-
if center_shift != 0:
|
127 |
-
center[0] += center_shift
|
128 |
-
center[1] += center_shift
|
129 |
-
new_landmarks = new_landmarks + center_shift
|
130 |
-
length = 200 * scale
|
131 |
-
top = int(center[1] - length // 2)
|
132 |
-
bottom = int(center[1] + length // 2)
|
133 |
-
left = int(center[0] - length // 2)
|
134 |
-
right = int(center[0] + length // 2)
|
135 |
-
y_pad = abs(min(top, new_image.shape[0] - bottom, 0))
|
136 |
-
x_pad = abs(min(left, new_image.shape[1] - right, 0))
|
137 |
-
top, bottom, left, right = top + y_pad, bottom + y_pad, left + x_pad, right + x_pad
|
138 |
-
new_image = cv2.copyMakeBorder(new_image, y_pad,
|
139 |
-
y_pad,
|
140 |
-
x_pad,
|
141 |
-
x_pad,
|
142 |
-
cv2.BORDER_CONSTANT, value=[0,0,0])
|
143 |
-
new_image = new_image[top:bottom, left:right]
|
144 |
-
new_image = cv2.resize(new_image, dsize=(int(resolution), int(resolution)),
|
145 |
-
interpolation=cv2.INTER_LINEAR)
|
146 |
-
new_landmarks[:, 0] = (new_landmarks[:, 0] + x_pad - left) * resolution / length
|
147 |
-
new_landmarks[:, 1] = (new_landmarks[:, 1] + y_pad - top) * resolution / length
|
148 |
-
return new_image, new_landmarks
|
149 |
-
|
150 |
-
def cv_rotate(image, landmarks, heatmap, rot, scale, resolution=256):
|
151 |
-
img_mat = cv2.getRotationMatrix2D((resolution//2, resolution//2), rot, scale)
|
152 |
-
ones = np.ones(shape=(landmarks.shape[0], 1))
|
153 |
-
stacked_landmarks = np.hstack([landmarks, ones])
|
154 |
-
new_landmarks = img_mat.dot(stacked_landmarks.T).T
|
155 |
-
if np.max(new_landmarks) > 255 or np.min(new_landmarks) < 0:
|
156 |
-
return image, landmarks, heatmap
|
157 |
-
else:
|
158 |
-
new_image = cv2.warpAffine(image, img_mat, (resolution, resolution))
|
159 |
-
if heatmap is not None:
|
160 |
-
new_heatmap = np.zeros((heatmap.shape[0], 64, 64))
|
161 |
-
for i in range(heatmap.shape[0]):
|
162 |
-
if new_landmarks[i][0] > 0:
|
163 |
-
new_heatmap[i] = draw_gaussian(new_heatmap[i],
|
164 |
-
new_landmarks[i]/4.0+1, 1)
|
165 |
-
return new_image, new_landmarks, new_heatmap
|
166 |
-
|
167 |
-
def show_landmarks(image, heatmap, gt_landmarks, gt_heatmap):
|
168 |
-
"""Show image with pred_landmarks"""
|
169 |
-
pred_landmarks = []
|
170 |
-
pred_landmarks, _ = get_preds_fromhm(torch.from_numpy(heatmap).unsqueeze(0))
|
171 |
-
pred_landmarks = pred_landmarks.squeeze()*4
|
172 |
-
|
173 |
-
# pred_landmarks2 = get_preds_fromhm2(heatmap)
|
174 |
-
heatmap = np.max(gt_heatmap, axis=0)
|
175 |
-
heatmap = heatmap / np.max(heatmap)
|
176 |
-
# image = ski_transform.resize(image, (64, 64))*255
|
177 |
-
image = image.astype(np.uint8)
|
178 |
-
heatmap = np.max(gt_heatmap, axis=0)
|
179 |
-
heatmap = ski_transform.resize(heatmap, (image.shape[0], image.shape[1]))
|
180 |
-
heatmap *= 255
|
181 |
-
heatmap = heatmap.astype(np.uint8)
|
182 |
-
heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
|
183 |
-
plt.imshow(image)
|
184 |
-
plt.scatter(gt_landmarks[:, 0], gt_landmarks[:, 1], s=0.5, marker='.', c='g')
|
185 |
-
plt.scatter(pred_landmarks[:, 0], pred_landmarks[:, 1], s=0.5, marker='.', c='r')
|
186 |
-
plt.pause(0.001) # pause a bit so that plots are updated
|
187 |
-
|
188 |
-
def fan_NME(pred_heatmaps, gt_landmarks, num_landmarks=68):
|
189 |
-
'''
|
190 |
-
Calculate total NME for a batch of data
|
191 |
-
|
192 |
-
Args:
|
193 |
-
pred_heatmaps: torch tensor of size [batch, points, height, width]
|
194 |
-
gt_landmarks: torch tesnsor of size [batch, points, x, y]
|
195 |
-
|
196 |
-
Returns:
|
197 |
-
nme: sum of nme for this batch
|
198 |
-
'''
|
199 |
-
nme = 0
|
200 |
-
pred_landmarks, _ = get_preds_fromhm(pred_heatmaps)
|
201 |
-
pred_landmarks = pred_landmarks.numpy()
|
202 |
-
gt_landmarks = gt_landmarks.numpy()
|
203 |
-
for i in range(pred_landmarks.shape[0]):
|
204 |
-
pred_landmark = pred_landmarks[i] * 4.0
|
205 |
-
gt_landmark = gt_landmarks[i]
|
206 |
-
|
207 |
-
if num_landmarks == 68:
|
208 |
-
left_eye = np.average(gt_landmark[36:42], axis=0)
|
209 |
-
right_eye = np.average(gt_landmark[42:48], axis=0)
|
210 |
-
norm_factor = np.linalg.norm(left_eye - right_eye)
|
211 |
-
# norm_factor = np.linalg.norm(gt_landmark[36]- gt_landmark[45])
|
212 |
-
elif num_landmarks == 98:
|
213 |
-
norm_factor = np.linalg.norm(gt_landmark[60]- gt_landmark[72])
|
214 |
-
elif num_landmarks == 19:
|
215 |
-
left, top = gt_landmark[-2, :]
|
216 |
-
right, bottom = gt_landmark[-1, :]
|
217 |
-
norm_factor = math.sqrt(abs(right - left)*abs(top-bottom))
|
218 |
-
gt_landmark = gt_landmark[:-2, :]
|
219 |
-
elif num_landmarks == 29:
|
220 |
-
# norm_factor = np.linalg.norm(gt_landmark[8]- gt_landmark[9])
|
221 |
-
norm_factor = np.linalg.norm(gt_landmark[16]- gt_landmark[17])
|
222 |
-
nme += (np.sum(np.linalg.norm(pred_landmark - gt_landmark, axis=1)) / pred_landmark.shape[0]) / norm_factor
|
223 |
-
return nme
|
224 |
-
|
225 |
-
def fan_NME_hm(pred_heatmaps, gt_heatmaps, num_landmarks=68):
|
226 |
-
'''
|
227 |
-
Calculate total NME for a batch of data
|
228 |
-
|
229 |
-
Args:
|
230 |
-
pred_heatmaps: torch tensor of size [batch, points, height, width]
|
231 |
-
gt_landmarks: torch tesnsor of size [batch, points, x, y]
|
232 |
-
|
233 |
-
Returns:
|
234 |
-
nme: sum of nme for this batch
|
235 |
-
'''
|
236 |
-
nme = 0
|
237 |
-
pred_landmarks, _ = get_index_fromhm(pred_heatmaps)
|
238 |
-
pred_landmarks = pred_landmarks.numpy()
|
239 |
-
gt_landmarks = gt_landmarks.numpy()
|
240 |
-
for i in range(pred_landmarks.shape[0]):
|
241 |
-
pred_landmark = pred_landmarks[i] * 4.0
|
242 |
-
gt_landmark = gt_landmarks[i]
|
243 |
-
if num_landmarks == 68:
|
244 |
-
left_eye = np.average(gt_landmark[36:42], axis=0)
|
245 |
-
right_eye = np.average(gt_landmark[42:48], axis=0)
|
246 |
-
norm_factor = np.linalg.norm(left_eye - right_eye)
|
247 |
-
else:
|
248 |
-
norm_factor = np.linalg.norm(gt_landmark[60]- gt_landmark[72])
|
249 |
-
nme += (np.sum(np.linalg.norm(pred_landmark - gt_landmark, axis=1)) / pred_landmark.shape[0]) / norm_factor
|
250 |
-
return nme
|
251 |
-
|
252 |
-
def power_transform(img, power):
|
253 |
-
img = np.array(img)
|
254 |
-
img_new = np.power((img/255.0), power) * 255.0
|
255 |
-
img_new = img_new.astype(np.uint8)
|
256 |
-
img_new = Image.fromarray(img_new)
|
257 |
-
return img_new
|
258 |
-
|
259 |
-
def get_preds_fromhm(hm, center=None, scale=None, rot=None):
|
260 |
-
max, idx = torch.max(
|
261 |
-
hm.view(hm.size(0), hm.size(1), hm.size(2) * hm.size(3)), 2)
|
262 |
-
idx += 1
|
263 |
-
preds = idx.view(idx.size(0), idx.size(1), 1).repeat(1, 1, 2).float()
|
264 |
-
preds[..., 0].apply_(lambda x: (x - 1) % hm.size(3) + 1)
|
265 |
-
preds[..., 1].add_(-1).div_(hm.size(2)).floor_().add_(1)
|
266 |
-
|
267 |
-
for i in range(preds.size(0)):
|
268 |
-
for j in range(preds.size(1)):
|
269 |
-
hm_ = hm[i, j, :]
|
270 |
-
pX, pY = int(preds[i, j, 0]) - 1, int(preds[i, j, 1]) - 1
|
271 |
-
if pX > 0 and pX < 63 and pY > 0 and pY < 63:
|
272 |
-
diff = torch.FloatTensor(
|
273 |
-
[hm_[pY, pX + 1] - hm_[pY, pX - 1],
|
274 |
-
hm_[pY + 1, pX] - hm_[pY - 1, pX]])
|
275 |
-
preds[i, j].add_(diff.sign_().mul_(.25))
|
276 |
-
|
277 |
-
preds.add_(-0.5)
|
278 |
-
|
279 |
-
preds_orig = torch.zeros(preds.size())
|
280 |
-
if center is not None and scale is not None:
|
281 |
-
for i in range(hm.size(0)):
|
282 |
-
for j in range(hm.size(1)):
|
283 |
-
preds_orig[i, j] = transform(
|
284 |
-
preds[i, j], center, scale, hm.size(2), rot, True)
|
285 |
-
|
286 |
-
return preds, preds_orig
|
287 |
-
|
288 |
-
def get_index_fromhm(hm):
|
289 |
-
max, idx = torch.max(
|
290 |
-
hm.view(hm.size(0), hm.size(1), hm.size(2) * hm.size(3)), 2)
|
291 |
-
preds = idx.view(idx.size(0), idx.size(1), 1).repeat(1, 1, 2).float()
|
292 |
-
preds[..., 0].remainder_(hm.size(3))
|
293 |
-
preds[..., 1].div_(hm.size(2)).floor_()
|
294 |
-
|
295 |
-
for i in range(preds.size(0)):
|
296 |
-
for j in range(preds.size(1)):
|
297 |
-
hm_ = hm[i, j, :]
|
298 |
-
pX, pY = int(preds[i, j, 0]), int(preds[i, j, 1])
|
299 |
-
if pX > 0 and pX < 63 and pY > 0 and pY < 63:
|
300 |
-
diff = torch.FloatTensor(
|
301 |
-
[hm_[pY, pX + 1] - hm_[pY, pX - 1],
|
302 |
-
hm_[pY + 1, pX] - hm_[pY - 1, pX]])
|
303 |
-
preds[i, j].add_(diff.sign_().mul_(.25))
|
304 |
-
|
305 |
-
return preds
|
306 |
-
|
307 |
-
def shuffle_lr(parts, num_landmarks=68, pairs=None):
|
308 |
-
if num_landmarks == 68:
|
309 |
-
if pairs is None:
|
310 |
-
pairs = [[0, 16], [1, 15], [2, 14], [3, 13], [4, 12], [5, 11], [6, 10],
|
311 |
-
[7, 9], [17, 26], [18, 25], [19, 24], [20, 23], [21, 22], [36, 45],
|
312 |
-
[37, 44], [38, 43], [39, 42], [41, 46], [40, 47], [31, 35], [32, 34],
|
313 |
-
[50, 52], [49, 53], [48, 54], [61, 63], [60, 64], [67, 65], [59, 55], [58, 56]]
|
314 |
-
elif num_landmarks == 98:
|
315 |
-
if pairs is None:
|
316 |
-
pairs = [[0, 32], [1,31], [2, 30], [3, 29], [4, 28], [5, 27], [6, 26], [7, 25], [8, 24], [9, 23], [10, 22], [11, 21], [12, 20], [13, 19], [14, 18], [15, 17], [33, 46], [34, 45], [35, 44], [36, 43], [37, 42], [38, 50], [39, 49], [40, 48], [41, 47], [60, 72], [61, 71], [62, 70], [63, 69], [64, 68], [65, 75], [66, 74], [67, 73], [96, 97], [55, 59], [56, 58], [76, 82], [77, 81], [78, 80], [88, 92], [89, 91], [95, 93], [87, 83], [86, 84]]
|
317 |
-
elif num_landmarks == 19:
|
318 |
-
if pairs is None:
|
319 |
-
pairs = [[0, 5], [1, 4], [2, 3], [6, 11], [7, 10], [8, 9], [12, 14], [15, 17]]
|
320 |
-
elif num_landmarks == 29:
|
321 |
-
if pairs is None:
|
322 |
-
pairs = [[0, 1], [4, 6], [5, 7], [2, 3], [8, 9], [12, 14], [16, 17], [13, 15], [10, 11], [18, 19], [22, 23]]
|
323 |
-
for matched_p in pairs:
|
324 |
-
idx1, idx2 = matched_p[0], matched_p[1]
|
325 |
-
tmp = np.copy(parts[idx1])
|
326 |
-
np.copyto(parts[idx1], parts[idx2])
|
327 |
-
np.copyto(parts[idx2], tmp)
|
328 |
-
return parts
|
329 |
-
|
330 |
-
|
331 |
-
def generate_weight_map(weight_map,heatmap):
|
332 |
-
|
333 |
-
k_size = 3
|
334 |
-
dilate = ndimage.grey_dilation(heatmap ,size=(k_size,k_size))
|
335 |
-
weight_map[np.where(dilate>0.2)] = 1
|
336 |
-
return weight_map
|
337 |
-
|
338 |
-
def fig2data(fig):
|
339 |
-
"""
|
340 |
-
@brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it
|
341 |
-
@param fig a matplotlib figure
|
342 |
-
@return a numpy 3D array of RGBA values
|
343 |
-
"""
|
344 |
-
# draw the renderer
|
345 |
-
fig.canvas.draw ( )
|
346 |
-
|
347 |
-
# Get the RGB buffer from the figure
|
348 |
-
w,h = fig.canvas.get_width_height()
|
349 |
-
buf = np.fromstring (fig.canvas.tostring_rgb(), dtype=np.uint8)
|
350 |
-
buf.shape = (w, h, 3)
|
351 |
-
|
352 |
-
# canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode
|
353 |
-
buf = np.roll (buf, 3, axis=2)
|
354 |
-
return buf
|
|
|
1 |
+
from __future__ import print_function, division
|
2 |
+
import os
|
3 |
+
import sys
|
4 |
+
import math
|
5 |
+
import torch
|
6 |
+
import cv2
|
7 |
+
from PIL import Image
|
8 |
+
from skimage import io
|
9 |
+
from skimage import transform as ski_transform
|
10 |
+
from scipy import ndimage
|
11 |
+
import numpy as np
|
12 |
+
import matplotlib
|
13 |
+
import matplotlib.pyplot as plt
|
14 |
+
from torch.utils.data import Dataset, DataLoader
|
15 |
+
from torchvision import transforms, utils
|
16 |
+
|
17 |
+
def _gaussian(
|
18 |
+
size=3, sigma=0.25, amplitude=1, normalize=False, width=None,
|
19 |
+
height=None, sigma_horz=None, sigma_vert=None, mean_horz=0.5,
|
20 |
+
mean_vert=0.5):
|
21 |
+
# handle some defaults
|
22 |
+
if width is None:
|
23 |
+
width = size
|
24 |
+
if height is None:
|
25 |
+
height = size
|
26 |
+
if sigma_horz is None:
|
27 |
+
sigma_horz = sigma
|
28 |
+
if sigma_vert is None:
|
29 |
+
sigma_vert = sigma
|
30 |
+
center_x = mean_horz * width + 0.5
|
31 |
+
center_y = mean_vert * height + 0.5
|
32 |
+
gauss = np.empty((height, width), dtype=np.float32)
|
33 |
+
# generate kernel
|
34 |
+
for i in range(height):
|
35 |
+
for j in range(width):
|
36 |
+
gauss[i][j] = amplitude * math.exp(-(math.pow((j + 1 - center_x) / (
|
37 |
+
sigma_horz * width), 2) / 2.0 + math.pow((i + 1 - center_y) / (sigma_vert * height), 2) / 2.0))
|
38 |
+
if normalize:
|
39 |
+
gauss = gauss / np.sum(gauss)
|
40 |
+
return gauss
|
41 |
+
|
42 |
+
def draw_gaussian(image, point, sigma):
|
43 |
+
# Check if the gaussian is inside
|
44 |
+
ul = [np.floor(np.floor(point[0]) - 3 * sigma),
|
45 |
+
np.floor(np.floor(point[1]) - 3 * sigma)]
|
46 |
+
br = [np.floor(np.floor(point[0]) + 3 * sigma),
|
47 |
+
np.floor(np.floor(point[1]) + 3 * sigma)]
|
48 |
+
if (ul[0] > image.shape[1] or ul[1] >
|
49 |
+
image.shape[0] or br[0] < 1 or br[1] < 1):
|
50 |
+
return image
|
51 |
+
size = 6 * sigma + 1
|
52 |
+
g = _gaussian(size)
|
53 |
+
g_x = [int(max(1, -ul[0])), int(min(br[0], image.shape[1])) -
|
54 |
+
int(max(1, ul[0])) + int(max(1, -ul[0]))]
|
55 |
+
g_y = [int(max(1, -ul[1])), int(min(br[1], image.shape[0])) -
|
56 |
+
int(max(1, ul[1])) + int(max(1, -ul[1]))]
|
57 |
+
img_x = [int(max(1, ul[0])), int(min(br[0], image.shape[1]))]
|
58 |
+
img_y = [int(max(1, ul[1])), int(min(br[1], image.shape[0]))]
|
59 |
+
assert (g_x[0] > 0 and g_y[1] > 0)
|
60 |
+
correct = False
|
61 |
+
while not correct:
|
62 |
+
try:
|
63 |
+
image[img_y[0] - 1:img_y[1], img_x[0] - 1:img_x[1]
|
64 |
+
] = image[img_y[0] - 1:img_y[1], img_x[0] - 1:img_x[1]] + g[g_y[0] - 1:g_y[1], g_x[0] - 1:g_x[1]]
|
65 |
+
correct = True
|
66 |
+
except:
|
67 |
+
print('img_x: {}, img_y: {}, g_x:{}, g_y:{}, point:{}, g_shape:{}, ul:{}, br:{}'.format(img_x, img_y, g_x, g_y, point, g.shape, ul, br))
|
68 |
+
ul = [np.floor(np.floor(point[0]) - 3 * sigma),
|
69 |
+
np.floor(np.floor(point[1]) - 3 * sigma)]
|
70 |
+
br = [np.floor(np.floor(point[0]) + 3 * sigma),
|
71 |
+
np.floor(np.floor(point[1]) + 3 * sigma)]
|
72 |
+
g_x = [int(max(1, -ul[0])), int(min(br[0], image.shape[1])) -
|
73 |
+
int(max(1, ul[0])) + int(max(1, -ul[0]))]
|
74 |
+
g_y = [int(max(1, -ul[1])), int(min(br[1], image.shape[0])) -
|
75 |
+
int(max(1, ul[1])) + int(max(1, -ul[1]))]
|
76 |
+
img_x = [int(max(1, ul[0])), int(min(br[0], image.shape[1]))]
|
77 |
+
img_y = [int(max(1, ul[1])), int(min(br[1], image.shape[0]))]
|
78 |
+
pass
|
79 |
+
image[image > 1] = 1
|
80 |
+
return image
|
81 |
+
|
82 |
+
def transform(point, center, scale, resolution, rotation=0, invert=False):
|
83 |
+
_pt = np.ones(3)
|
84 |
+
_pt[0] = point[0]
|
85 |
+
_pt[1] = point[1]
|
86 |
+
|
87 |
+
h = 200.0 * scale
|
88 |
+
t = np.eye(3)
|
89 |
+
t[0, 0] = resolution / h
|
90 |
+
t[1, 1] = resolution / h
|
91 |
+
t[0, 2] = resolution * (-center[0] / h + 0.5)
|
92 |
+
t[1, 2] = resolution * (-center[1] / h + 0.5)
|
93 |
+
|
94 |
+
if rotation != 0:
|
95 |
+
rotation = -rotation
|
96 |
+
r = np.eye(3)
|
97 |
+
ang = rotation * math.pi / 180.0
|
98 |
+
s = math.sin(ang)
|
99 |
+
c = math.cos(ang)
|
100 |
+
r[0][0] = c
|
101 |
+
r[0][1] = -s
|
102 |
+
r[1][0] = s
|
103 |
+
r[1][1] = c
|
104 |
+
|
105 |
+
t_ = np.eye(3)
|
106 |
+
t_[0][2] = -resolution / 2.0
|
107 |
+
t_[1][2] = -resolution / 2.0
|
108 |
+
t_inv = torch.eye(3)
|
109 |
+
t_inv[0][2] = resolution / 2.0
|
110 |
+
t_inv[1][2] = resolution / 2.0
|
111 |
+
t = reduce(np.matmul, [t_inv, r, t_, t])
|
112 |
+
|
113 |
+
if invert:
|
114 |
+
t = np.linalg.inv(t)
|
115 |
+
new_point = (np.matmul(t, _pt))[0:2]
|
116 |
+
|
117 |
+
return new_point.astype(int)
|
118 |
+
|
119 |
+
def cv_crop(image, landmarks, center, scale, resolution=256, center_shift=0):
|
120 |
+
new_image = cv2.copyMakeBorder(image, center_shift,
|
121 |
+
center_shift,
|
122 |
+
center_shift,
|
123 |
+
center_shift,
|
124 |
+
cv2.BORDER_CONSTANT, value=[0,0,0])
|
125 |
+
new_landmarks = landmarks.copy()
|
126 |
+
if center_shift != 0:
|
127 |
+
center[0] += center_shift
|
128 |
+
center[1] += center_shift
|
129 |
+
new_landmarks = new_landmarks + center_shift
|
130 |
+
length = 200 * scale
|
131 |
+
top = int(center[1] - length // 2)
|
132 |
+
bottom = int(center[1] + length // 2)
|
133 |
+
left = int(center[0] - length // 2)
|
134 |
+
right = int(center[0] + length // 2)
|
135 |
+
y_pad = abs(min(top, new_image.shape[0] - bottom, 0))
|
136 |
+
x_pad = abs(min(left, new_image.shape[1] - right, 0))
|
137 |
+
top, bottom, left, right = top + y_pad, bottom + y_pad, left + x_pad, right + x_pad
|
138 |
+
new_image = cv2.copyMakeBorder(new_image, y_pad,
|
139 |
+
y_pad,
|
140 |
+
x_pad,
|
141 |
+
x_pad,
|
142 |
+
cv2.BORDER_CONSTANT, value=[0,0,0])
|
143 |
+
new_image = new_image[top:bottom, left:right]
|
144 |
+
new_image = cv2.resize(new_image, dsize=(int(resolution), int(resolution)),
|
145 |
+
interpolation=cv2.INTER_LINEAR)
|
146 |
+
new_landmarks[:, 0] = (new_landmarks[:, 0] + x_pad - left) * resolution / length
|
147 |
+
new_landmarks[:, 1] = (new_landmarks[:, 1] + y_pad - top) * resolution / length
|
148 |
+
return new_image, new_landmarks
|
149 |
+
|
150 |
+
def cv_rotate(image, landmarks, heatmap, rot, scale, resolution=256):
|
151 |
+
img_mat = cv2.getRotationMatrix2D((resolution//2, resolution//2), rot, scale)
|
152 |
+
ones = np.ones(shape=(landmarks.shape[0], 1))
|
153 |
+
stacked_landmarks = np.hstack([landmarks, ones])
|
154 |
+
new_landmarks = img_mat.dot(stacked_landmarks.T).T
|
155 |
+
if np.max(new_landmarks) > 255 or np.min(new_landmarks) < 0:
|
156 |
+
return image, landmarks, heatmap
|
157 |
+
else:
|
158 |
+
new_image = cv2.warpAffine(image, img_mat, (resolution, resolution))
|
159 |
+
if heatmap is not None:
|
160 |
+
new_heatmap = np.zeros((heatmap.shape[0], 64, 64))
|
161 |
+
for i in range(heatmap.shape[0]):
|
162 |
+
if new_landmarks[i][0] > 0:
|
163 |
+
new_heatmap[i] = draw_gaussian(new_heatmap[i],
|
164 |
+
new_landmarks[i]/4.0+1, 1)
|
165 |
+
return new_image, new_landmarks, new_heatmap
|
166 |
+
|
167 |
+
def show_landmarks(image, heatmap, gt_landmarks, gt_heatmap):
|
168 |
+
"""Show image with pred_landmarks"""
|
169 |
+
pred_landmarks = []
|
170 |
+
pred_landmarks, _ = get_preds_fromhm(torch.from_numpy(heatmap).unsqueeze(0))
|
171 |
+
pred_landmarks = pred_landmarks.squeeze()*4
|
172 |
+
|
173 |
+
# pred_landmarks2 = get_preds_fromhm2(heatmap)
|
174 |
+
heatmap = np.max(gt_heatmap, axis=0)
|
175 |
+
heatmap = heatmap / np.max(heatmap)
|
176 |
+
# image = ski_transform.resize(image, (64, 64))*255
|
177 |
+
image = image.astype(np.uint8)
|
178 |
+
heatmap = np.max(gt_heatmap, axis=0)
|
179 |
+
heatmap = ski_transform.resize(heatmap, (image.shape[0], image.shape[1]))
|
180 |
+
heatmap *= 255
|
181 |
+
heatmap = heatmap.astype(np.uint8)
|
182 |
+
heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
|
183 |
+
plt.imshow(image)
|
184 |
+
plt.scatter(gt_landmarks[:, 0], gt_landmarks[:, 1], s=0.5, marker='.', c='g')
|
185 |
+
plt.scatter(pred_landmarks[:, 0], pred_landmarks[:, 1], s=0.5, marker='.', c='r')
|
186 |
+
plt.pause(0.001) # pause a bit so that plots are updated
|
187 |
+
|
188 |
+
def fan_NME(pred_heatmaps, gt_landmarks, num_landmarks=68):
|
189 |
+
'''
|
190 |
+
Calculate total NME for a batch of data
|
191 |
+
|
192 |
+
Args:
|
193 |
+
pred_heatmaps: torch tensor of size [batch, points, height, width]
|
194 |
+
gt_landmarks: torch tesnsor of size [batch, points, x, y]
|
195 |
+
|
196 |
+
Returns:
|
197 |
+
nme: sum of nme for this batch
|
198 |
+
'''
|
199 |
+
nme = 0
|
200 |
+
pred_landmarks, _ = get_preds_fromhm(pred_heatmaps)
|
201 |
+
pred_landmarks = pred_landmarks.numpy()
|
202 |
+
gt_landmarks = gt_landmarks.numpy()
|
203 |
+
for i in range(pred_landmarks.shape[0]):
|
204 |
+
pred_landmark = pred_landmarks[i] * 4.0
|
205 |
+
gt_landmark = gt_landmarks[i]
|
206 |
+
|
207 |
+
if num_landmarks == 68:
|
208 |
+
left_eye = np.average(gt_landmark[36:42], axis=0)
|
209 |
+
right_eye = np.average(gt_landmark[42:48], axis=0)
|
210 |
+
norm_factor = np.linalg.norm(left_eye - right_eye)
|
211 |
+
# norm_factor = np.linalg.norm(gt_landmark[36]- gt_landmark[45])
|
212 |
+
elif num_landmarks == 98:
|
213 |
+
norm_factor = np.linalg.norm(gt_landmark[60]- gt_landmark[72])
|
214 |
+
elif num_landmarks == 19:
|
215 |
+
left, top = gt_landmark[-2, :]
|
216 |
+
right, bottom = gt_landmark[-1, :]
|
217 |
+
norm_factor = math.sqrt(abs(right - left)*abs(top-bottom))
|
218 |
+
gt_landmark = gt_landmark[:-2, :]
|
219 |
+
elif num_landmarks == 29:
|
220 |
+
# norm_factor = np.linalg.norm(gt_landmark[8]- gt_landmark[9])
|
221 |
+
norm_factor = np.linalg.norm(gt_landmark[16]- gt_landmark[17])
|
222 |
+
nme += (np.sum(np.linalg.norm(pred_landmark - gt_landmark, axis=1)) / pred_landmark.shape[0]) / norm_factor
|
223 |
+
return nme
|
224 |
+
|
225 |
+
def fan_NME_hm(pred_heatmaps, gt_heatmaps, num_landmarks=68):
|
226 |
+
'''
|
227 |
+
Calculate total NME for a batch of data
|
228 |
+
|
229 |
+
Args:
|
230 |
+
pred_heatmaps: torch tensor of size [batch, points, height, width]
|
231 |
+
gt_landmarks: torch tesnsor of size [batch, points, x, y]
|
232 |
+
|
233 |
+
Returns:
|
234 |
+
nme: sum of nme for this batch
|
235 |
+
'''
|
236 |
+
nme = 0
|
237 |
+
pred_landmarks, _ = get_index_fromhm(pred_heatmaps)
|
238 |
+
pred_landmarks = pred_landmarks.numpy()
|
239 |
+
gt_landmarks = gt_landmarks.numpy()
|
240 |
+
for i in range(pred_landmarks.shape[0]):
|
241 |
+
pred_landmark = pred_landmarks[i] * 4.0
|
242 |
+
gt_landmark = gt_landmarks[i]
|
243 |
+
if num_landmarks == 68:
|
244 |
+
left_eye = np.average(gt_landmark[36:42], axis=0)
|
245 |
+
right_eye = np.average(gt_landmark[42:48], axis=0)
|
246 |
+
norm_factor = np.linalg.norm(left_eye - right_eye)
|
247 |
+
else:
|
248 |
+
norm_factor = np.linalg.norm(gt_landmark[60]- gt_landmark[72])
|
249 |
+
nme += (np.sum(np.linalg.norm(pred_landmark - gt_landmark, axis=1)) / pred_landmark.shape[0]) / norm_factor
|
250 |
+
return nme
|
251 |
+
|
252 |
+
def power_transform(img, power):
|
253 |
+
img = np.array(img)
|
254 |
+
img_new = np.power((img/255.0), power) * 255.0
|
255 |
+
img_new = img_new.astype(np.uint8)
|
256 |
+
img_new = Image.fromarray(img_new)
|
257 |
+
return img_new
|
258 |
+
|
259 |
+
def get_preds_fromhm(hm, center=None, scale=None, rot=None):
|
260 |
+
max, idx = torch.max(
|
261 |
+
hm.view(hm.size(0), hm.size(1), hm.size(2) * hm.size(3)), 2)
|
262 |
+
idx += 1
|
263 |
+
preds = idx.view(idx.size(0), idx.size(1), 1).repeat(1, 1, 2).float()
|
264 |
+
preds[..., 0].apply_(lambda x: (x - 1) % hm.size(3) + 1)
|
265 |
+
preds[..., 1].add_(-1).div_(hm.size(2)).floor_().add_(1)
|
266 |
+
|
267 |
+
for i in range(preds.size(0)):
|
268 |
+
for j in range(preds.size(1)):
|
269 |
+
hm_ = hm[i, j, :]
|
270 |
+
pX, pY = int(preds[i, j, 0]) - 1, int(preds[i, j, 1]) - 1
|
271 |
+
if pX > 0 and pX < 63 and pY > 0 and pY < 63:
|
272 |
+
diff = torch.FloatTensor(
|
273 |
+
[hm_[pY, pX + 1] - hm_[pY, pX - 1],
|
274 |
+
hm_[pY + 1, pX] - hm_[pY - 1, pX]])
|
275 |
+
preds[i, j].add_(diff.sign_().mul_(.25))
|
276 |
+
|
277 |
+
preds.add_(-0.5)
|
278 |
+
|
279 |
+
preds_orig = torch.zeros(preds.size())
|
280 |
+
if center is not None and scale is not None:
|
281 |
+
for i in range(hm.size(0)):
|
282 |
+
for j in range(hm.size(1)):
|
283 |
+
preds_orig[i, j] = transform(
|
284 |
+
preds[i, j], center, scale, hm.size(2), rot, True)
|
285 |
+
|
286 |
+
return preds, preds_orig
|
287 |
+
|
288 |
+
def get_index_fromhm(hm):
|
289 |
+
max, idx = torch.max(
|
290 |
+
hm.view(hm.size(0), hm.size(1), hm.size(2) * hm.size(3)), 2)
|
291 |
+
preds = idx.view(idx.size(0), idx.size(1), 1).repeat(1, 1, 2).float()
|
292 |
+
preds[..., 0].remainder_(hm.size(3))
|
293 |
+
preds[..., 1].div_(hm.size(2)).floor_()
|
294 |
+
|
295 |
+
for i in range(preds.size(0)):
|
296 |
+
for j in range(preds.size(1)):
|
297 |
+
hm_ = hm[i, j, :]
|
298 |
+
pX, pY = int(preds[i, j, 0]), int(preds[i, j, 1])
|
299 |
+
if pX > 0 and pX < 63 and pY > 0 and pY < 63:
|
300 |
+
diff = torch.FloatTensor(
|
301 |
+
[hm_[pY, pX + 1] - hm_[pY, pX - 1],
|
302 |
+
hm_[pY + 1, pX] - hm_[pY - 1, pX]])
|
303 |
+
preds[i, j].add_(diff.sign_().mul_(.25))
|
304 |
+
|
305 |
+
return preds
|
306 |
+
|
307 |
+
def shuffle_lr(parts, num_landmarks=68, pairs=None):
|
308 |
+
if num_landmarks == 68:
|
309 |
+
if pairs is None:
|
310 |
+
pairs = [[0, 16], [1, 15], [2, 14], [3, 13], [4, 12], [5, 11], [6, 10],
|
311 |
+
[7, 9], [17, 26], [18, 25], [19, 24], [20, 23], [21, 22], [36, 45],
|
312 |
+
[37, 44], [38, 43], [39, 42], [41, 46], [40, 47], [31, 35], [32, 34],
|
313 |
+
[50, 52], [49, 53], [48, 54], [61, 63], [60, 64], [67, 65], [59, 55], [58, 56]]
|
314 |
+
elif num_landmarks == 98:
|
315 |
+
if pairs is None:
|
316 |
+
pairs = [[0, 32], [1,31], [2, 30], [3, 29], [4, 28], [5, 27], [6, 26], [7, 25], [8, 24], [9, 23], [10, 22], [11, 21], [12, 20], [13, 19], [14, 18], [15, 17], [33, 46], [34, 45], [35, 44], [36, 43], [37, 42], [38, 50], [39, 49], [40, 48], [41, 47], [60, 72], [61, 71], [62, 70], [63, 69], [64, 68], [65, 75], [66, 74], [67, 73], [96, 97], [55, 59], [56, 58], [76, 82], [77, 81], [78, 80], [88, 92], [89, 91], [95, 93], [87, 83], [86, 84]]
|
317 |
+
elif num_landmarks == 19:
|
318 |
+
if pairs is None:
|
319 |
+
pairs = [[0, 5], [1, 4], [2, 3], [6, 11], [7, 10], [8, 9], [12, 14], [15, 17]]
|
320 |
+
elif num_landmarks == 29:
|
321 |
+
if pairs is None:
|
322 |
+
pairs = [[0, 1], [4, 6], [5, 7], [2, 3], [8, 9], [12, 14], [16, 17], [13, 15], [10, 11], [18, 19], [22, 23]]
|
323 |
+
for matched_p in pairs:
|
324 |
+
idx1, idx2 = matched_p[0], matched_p[1]
|
325 |
+
tmp = np.copy(parts[idx1])
|
326 |
+
np.copyto(parts[idx1], parts[idx2])
|
327 |
+
np.copyto(parts[idx2], tmp)
|
328 |
+
return parts
|
329 |
+
|
330 |
+
|
331 |
+
def generate_weight_map(weight_map,heatmap):
|
332 |
+
|
333 |
+
k_size = 3
|
334 |
+
dilate = ndimage.grey_dilation(heatmap ,size=(k_size,k_size))
|
335 |
+
weight_map[np.where(dilate>0.2)] = 1
|
336 |
+
return weight_map
|
337 |
+
|
338 |
+
def fig2data(fig):
|
339 |
+
"""
|
340 |
+
@brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it
|
341 |
+
@param fig a matplotlib figure
|
342 |
+
@return a numpy 3D array of RGBA values
|
343 |
+
"""
|
344 |
+
# draw the renderer
|
345 |
+
fig.canvas.draw ( )
|
346 |
+
|
347 |
+
# Get the RGB buffer from the figure
|
348 |
+
w,h = fig.canvas.get_width_height()
|
349 |
+
buf = np.fromstring (fig.canvas.tostring_rgb(), dtype=np.uint8)
|
350 |
+
buf.shape = (w, h, 3)
|
351 |
+
|
352 |
+
# canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode
|
353 |
+
buf = np.roll (buf, 3, axis=2)
|
354 |
+
return buf
|
MakeItTalk/thirdparty/__pycache__/__init__.cpython-37.pyc
CHANGED
Binary files a/MakeItTalk/thirdparty/__pycache__/__init__.cpython-37.pyc and b/MakeItTalk/thirdparty/__pycache__/__init__.cpython-37.pyc differ
|
|
MakeItTalk/thirdparty/face_of_art/CODEOWNERS
CHANGED
@@ -1 +1 @@
|
|
1 |
-
* @papulke
|
|
|
1 |
+
* @papulke
|
MakeItTalk/thirdparty/face_of_art/LICENCE.txt
CHANGED
@@ -1,21 +1,21 @@
|
|
1 |
-
MIT License
|
2 |
-
|
3 |
-
Copyright (c) 2019 Jordan Yaniv
|
4 |
-
|
5 |
-
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
-
of this software and associated documentation files (the "Software"), to deal
|
7 |
-
in the Software without restriction, including without limitation the rights
|
8 |
-
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
-
copies of the Software, and to permit persons to whom the Software is
|
10 |
-
furnished to do so, subject to the following conditions:
|
11 |
-
|
12 |
-
The above copyright notice and this permission notice shall be included in all
|
13 |
-
copies or substantial portions of the Software.
|
14 |
-
|
15 |
-
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
16 |
-
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
17 |
-
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
18 |
-
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
19 |
-
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
20 |
-
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
|
21 |
-
OR OTHER DEALINGS IN THE SOFTWARE.
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2019 Jordan Yaniv
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
16 |
+
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
17 |
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
18 |
+
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
19 |
+
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
20 |
+
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
|
21 |
+
OR OTHER DEALINGS IN THE SOFTWARE.
|
MakeItTalk/thirdparty/face_of_art/README.md
CHANGED
@@ -1,98 +1,98 @@
|
|
1 |
-
# The Face of Art: Landmark Detection and Geometric Style in Portraits
|
2 |
-
|
3 |
-
Code for the landmark detection framework described in [The Face of Art: Landmark Detection and Geometric Style in Portraits](http://www.faculty.idc.ac.il/arik/site/foa/face-of-art.asp) (SIGGRAPH 2019)
|
4 |
-
|
5 |
-
![](old/teaser.png)
|
6 |
-
<sub><sup>Top: landmark detection results on artistic portraits with different styles allows to define the geometric style of an artist. Bottom: results of the style transfer of portraits using various artists' geometric style, including Amedeo Modigliani, Pablo Picasso, Margaret Keane, Fernand Léger, and Tsuguharu Foujita. Top right portrait is from 'Woman with Peanuts,' ©1962, Estate of Roy Lichtenstein.</sup></sub>
|
7 |
-
|
8 |
-
## Getting Started
|
9 |
-
|
10 |
-
### Requirements
|
11 |
-
|
12 |
-
* python
|
13 |
-
* anaconda
|
14 |
-
|
15 |
-
### Download
|
16 |
-
|
17 |
-
#### Model
|
18 |
-
download model weights from [here](https://www.dropbox.com/sh/hrxcyug1bmbj6cs/AAAxq_zI5eawcLjM8zvUwaXha?dl=0).
|
19 |
-
|
20 |
-
#### Datasets
|
21 |
-
* The datasets used for training and evaluating our model can be found [here](https://ibug.doc.ic.ac.uk/resources/facial-point-annotations/).
|
22 |
-
|
23 |
-
* The Artistic-Faces dataset can be found [here](http://www.faculty.idc.ac.il/arik/site/foa/artistic-faces-dataset.asp).
|
24 |
-
|
25 |
-
* Training images with texture augmentation can be found [here](https://www.dropbox.com/sh/av2k1i1082z0nie/AAC5qV1E2UkqpDLVsv7TazMta?dl=0).
|
26 |
-
before applying texture style transfer, the training images were cropped to the ground-truth face bounding-box with 25% margin. To crop training images, run the script `crop_training_set.py`.
|
27 |
-
|
28 |
-
* our model expects the following directory structure of landmark detection datasets:
|
29 |
-
```
|
30 |
-
landmark_detection_datasets
|
31 |
-
├── training
|
32 |
-
├── test
|
33 |
-
├── challenging
|
34 |
-
├── common
|
35 |
-
├── full
|
36 |
-
├── crop_gt_margin_0.25 (cropped images of training set)
|
37 |
-
└── crop_gt_margin_0.25_ns (cropped images of training set + texture style transfer)
|
38 |
-
```
|
39 |
-
### Install
|
40 |
-
|
41 |
-
Create a virtual environment and install the following:
|
42 |
-
* opencv
|
43 |
-
* menpo
|
44 |
-
* menpofit
|
45 |
-
* tensorflow-gpu
|
46 |
-
|
47 |
-
for python 2:
|
48 |
-
```
|
49 |
-
conda create -n foa_env python=2.7 anaconda
|
50 |
-
source activate foa_env
|
51 |
-
conda install -c menpo opencv
|
52 |
-
conda install -c menpo menpo
|
53 |
-
conda install -c menpo menpofit
|
54 |
-
pip install tensorflow-gpu
|
55 |
-
|
56 |
-
```
|
57 |
-
|
58 |
-
for python 3:
|
59 |
-
```
|
60 |
-
conda create -n foa_env python=3.5 anaconda
|
61 |
-
source activate foa_env
|
62 |
-
conda install -c menpo opencv
|
63 |
-
conda install -c menpo menpo
|
64 |
-
conda install -c menpo menpofit
|
65 |
-
pip3 install tensorflow-gpu
|
66 |
-
|
67 |
-
```
|
68 |
-
|
69 |
-
Clone repository:
|
70 |
-
|
71 |
-
```
|
72 |
-
git clone https://github.com/papulke/deep_face_heatmaps
|
73 |
-
```
|
74 |
-
|
75 |
-
## Instructions
|
76 |
-
|
77 |
-
### Training
|
78 |
-
|
79 |
-
To train the network you need to run `train_heatmaps_network.py`
|
80 |
-
|
81 |
-
example for training a model with texture augmentation (100% of images) and geometric augmentation (~70% of images):
|
82 |
-
```
|
83 |
-
python train_heatmaps_network.py --output_dir='test_artistic_aug' --augment_geom=True \
|
84 |
-
--augment_texture=True --p_texture=1. --p_geom=0.7
|
85 |
-
```
|
86 |
-
|
87 |
-
### Testing
|
88 |
-
|
89 |
-
For using the detection framework to predict landmarks, run the script `predict_landmarks.py`
|
90 |
-
|
91 |
-
## Acknowledgments
|
92 |
-
|
93 |
-
* [ect](https://github.com/HongwenZhang/ECT-FaceAlignment)
|
94 |
-
* [menpo](https://github.com/menpo/menpo)
|
95 |
-
* [menpofit](https://github.com/menpo/menpofit)
|
96 |
-
* [mdm](https://github.com/trigeorgis/mdm)
|
97 |
-
* [style transfer implementation](https://github.com/woodrush/neural-art-tf)
|
98 |
-
* [painter-by-numbers dataset](https://www.kaggle.com/c/painter-by-numbers/data)
|
|
|
1 |
+
# The Face of Art: Landmark Detection and Geometric Style in Portraits
|
2 |
+
|
3 |
+
Code for the landmark detection framework described in [The Face of Art: Landmark Detection and Geometric Style in Portraits](http://www.faculty.idc.ac.il/arik/site/foa/face-of-art.asp) (SIGGRAPH 2019)
|
4 |
+
|
5 |
+
![](old/teaser.png)
|
6 |
+
<sub><sup>Top: landmark detection results on artistic portraits with different styles allows to define the geometric style of an artist. Bottom: results of the style transfer of portraits using various artists' geometric style, including Amedeo Modigliani, Pablo Picasso, Margaret Keane, Fernand Léger, and Tsuguharu Foujita. Top right portrait is from 'Woman with Peanuts,' ©1962, Estate of Roy Lichtenstein.</sup></sub>
|
7 |
+
|
8 |
+
## Getting Started
|
9 |
+
|
10 |
+
### Requirements
|
11 |
+
|
12 |
+
* python
|
13 |
+
* anaconda
|
14 |
+
|
15 |
+
### Download
|
16 |
+
|
17 |
+
#### Model
|
18 |
+
download model weights from [here](https://www.dropbox.com/sh/hrxcyug1bmbj6cs/AAAxq_zI5eawcLjM8zvUwaXha?dl=0).
|
19 |
+
|
20 |
+
#### Datasets
|
21 |
+
* The datasets used for training and evaluating our model can be found [here](https://ibug.doc.ic.ac.uk/resources/facial-point-annotations/).
|
22 |
+
|
23 |
+
* The Artistic-Faces dataset can be found [here](http://www.faculty.idc.ac.il/arik/site/foa/artistic-faces-dataset.asp).
|
24 |
+
|
25 |
+
* Training images with texture augmentation can be found [here](https://www.dropbox.com/sh/av2k1i1082z0nie/AAC5qV1E2UkqpDLVsv7TazMta?dl=0).
|
26 |
+
before applying texture style transfer, the training images were cropped to the ground-truth face bounding-box with 25% margin. To crop training images, run the script `crop_training_set.py`.
|
27 |
+
|
28 |
+
* our model expects the following directory structure of landmark detection datasets:
|
29 |
+
```
|
30 |
+
landmark_detection_datasets
|
31 |
+
├── training
|
32 |
+
├── test
|
33 |
+
├── challenging
|
34 |
+
├── common
|
35 |
+
├── full
|
36 |
+
├── crop_gt_margin_0.25 (cropped images of training set)
|
37 |
+
└── crop_gt_margin_0.25_ns (cropped images of training set + texture style transfer)
|
38 |
+
```
|
39 |
+
### Install
|
40 |
+
|
41 |
+
Create a virtual environment and install the following:
|
42 |
+
* opencv
|
43 |
+
* menpo
|
44 |
+
* menpofit
|
45 |
+
* tensorflow-gpu
|
46 |
+
|
47 |
+
for python 2:
|
48 |
+
```
|
49 |
+
conda create -n foa_env python=2.7 anaconda
|
50 |
+
source activate foa_env
|
51 |
+
conda install -c menpo opencv
|
52 |
+
conda install -c menpo menpo
|
53 |
+
conda install -c menpo menpofit
|
54 |
+
pip install tensorflow-gpu
|
55 |
+
|
56 |
+
```
|
57 |
+
|
58 |
+
for python 3:
|
59 |
+
```
|
60 |
+
conda create -n foa_env python=3.5 anaconda
|
61 |
+
source activate foa_env
|
62 |
+
conda install -c menpo opencv
|
63 |
+
conda install -c menpo menpo
|
64 |
+
conda install -c menpo menpofit
|
65 |
+
pip3 install tensorflow-gpu
|
66 |
+
|
67 |
+
```
|
68 |
+
|
69 |
+
Clone repository:
|
70 |
+
|
71 |
+
```
|
72 |
+
git clone https://github.com/papulke/deep_face_heatmaps
|
73 |
+
```
|
74 |
+
|
75 |
+
## Instructions
|
76 |
+
|
77 |
+
### Training
|
78 |
+
|
79 |
+
To train the network you need to run `train_heatmaps_network.py`
|
80 |
+
|
81 |
+
example for training a model with texture augmentation (100% of images) and geometric augmentation (~70% of images):
|
82 |
+
```
|
83 |
+
python train_heatmaps_network.py --output_dir='test_artistic_aug' --augment_geom=True \
|
84 |
+
--augment_texture=True --p_texture=1. --p_geom=0.7
|
85 |
+
```
|
86 |
+
|
87 |
+
### Testing
|
88 |
+
|
89 |
+
For using the detection framework to predict landmarks, run the script `predict_landmarks.py`
|
90 |
+
|
91 |
+
## Acknowledgments
|
92 |
+
|
93 |
+
* [ect](https://github.com/HongwenZhang/ECT-FaceAlignment)
|
94 |
+
* [menpo](https://github.com/menpo/menpo)
|
95 |
+
* [menpofit](https://github.com/menpo/menpofit)
|
96 |
+
* [mdm](https://github.com/trigeorgis/mdm)
|
97 |
+
* [style transfer implementation](https://github.com/woodrush/neural-art-tf)
|
98 |
+
* [painter-by-numbers dataset](https://www.kaggle.com/c/painter-by-numbers/data)
|
MakeItTalk/thirdparty/face_of_art/crop_training_set.py
CHANGED
@@ -1,38 +1,38 @@
|
|
1 |
-
from scipy.misc import imsave
|
2 |
-
from menpo_functions import *
|
3 |
-
from data_loading_functions import *
|
4 |
-
|
5 |
-
|
6 |
-
# define paths & parameters for cropping dataset
|
7 |
-
img_dir = '~/landmark_detection_datasets/'
|
8 |
-
dataset = 'training'
|
9 |
-
bb_type = 'gt'
|
10 |
-
margin = 0.25
|
11 |
-
image_size = 256
|
12 |
-
|
13 |
-
# load bounding boxes
|
14 |
-
bb_dir = os.path.join(img_dir, 'Bounding_Boxes')
|
15 |
-
bb_dictionary = load_bb_dictionary(bb_dir, mode='TRAIN', test_data=dataset)
|
16 |
-
|
17 |
-
# directory for saving face crops
|
18 |
-
outdir = os.path.join(img_dir, 'crop_'+bb_type+'_margin_'+str(margin))
|
19 |
-
if not os.path.exists(outdir):
|
20 |
-
os.mkdir(outdir)
|
21 |
-
|
22 |
-
# load images
|
23 |
-
imgs_to_crop = load_menpo_image_list(
|
24 |
-
img_dir=img_dir, train_crop_dir=None, img_dir_ns=None, mode='TRAIN', bb_dictionary=bb_dictionary,
|
25 |
-
image_size=image_size, margin=margin, bb_type=bb_type, augment_basic=False)
|
26 |
-
|
27 |
-
# save cropped images with matching landmarks
|
28 |
-
print ("\ncropping dataset from: "+os.path.join(img_dir, dataset))
|
29 |
-
print ("\nsaving cropped dataset to: "+outdir)
|
30 |
-
for im in imgs_to_crop:
|
31 |
-
if im.pixels.shape[0] == 1:
|
32 |
-
im_pixels = gray2rgb(np.squeeze(im.pixels))
|
33 |
-
else:
|
34 |
-
im_pixels = np.rollaxis(im.pixels, 0, 3)
|
35 |
-
imsave(os.path.join(outdir, im.path.name.split('.')[0]+'.png'), im_pixels)
|
36 |
-
mio.export_landmark_file(im.landmarks['PTS'], os.path.join(outdir, im.path.name.split('.')[0]+'.pts'))
|
37 |
-
|
38 |
-
print ("\ncropping dataset completed!")
|
|
|
1 |
+
from scipy.misc import imsave
|
2 |
+
from menpo_functions import *
|
3 |
+
from data_loading_functions import *
|
4 |
+
|
5 |
+
|
6 |
+
# define paths & parameters for cropping dataset
|
7 |
+
img_dir = '~/landmark_detection_datasets/'
|
8 |
+
dataset = 'training'
|
9 |
+
bb_type = 'gt'
|
10 |
+
margin = 0.25
|
11 |
+
image_size = 256
|
12 |
+
|
13 |
+
# load bounding boxes
|
14 |
+
bb_dir = os.path.join(img_dir, 'Bounding_Boxes')
|
15 |
+
bb_dictionary = load_bb_dictionary(bb_dir, mode='TRAIN', test_data=dataset)
|
16 |
+
|
17 |
+
# directory for saving face crops
|
18 |
+
outdir = os.path.join(img_dir, 'crop_'+bb_type+'_margin_'+str(margin))
|
19 |
+
if not os.path.exists(outdir):
|
20 |
+
os.mkdir(outdir)
|
21 |
+
|
22 |
+
# load images
|
23 |
+
imgs_to_crop = load_menpo_image_list(
|
24 |
+
img_dir=img_dir, train_crop_dir=None, img_dir_ns=None, mode='TRAIN', bb_dictionary=bb_dictionary,
|
25 |
+
image_size=image_size, margin=margin, bb_type=bb_type, augment_basic=False)
|
26 |
+
|
27 |
+
# save cropped images with matching landmarks
|
28 |
+
print ("\ncropping dataset from: "+os.path.join(img_dir, dataset))
|
29 |
+
print ("\nsaving cropped dataset to: "+outdir)
|
30 |
+
for im in imgs_to_crop:
|
31 |
+
if im.pixels.shape[0] == 1:
|
32 |
+
im_pixels = gray2rgb(np.squeeze(im.pixels))
|
33 |
+
else:
|
34 |
+
im_pixels = np.rollaxis(im.pixels, 0, 3)
|
35 |
+
imsave(os.path.join(outdir, im.path.name.split('.')[0]+'.png'), im_pixels)
|
36 |
+
mio.export_landmark_file(im.landmarks['PTS'], os.path.join(outdir, im.path.name.split('.')[0]+'.pts'))
|
37 |
+
|
38 |
+
print ("\ncropping dataset completed!")
|
MakeItTalk/thirdparty/face_of_art/data_loading_functions.py
CHANGED
@@ -1,161 +1,161 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import os
|
3 |
-
from skimage.color import gray2rgb
|
4 |
-
|
5 |
-
|
6 |
-
def train_val_shuffle_inds_per_epoch(valid_inds, train_inds, train_iter, batch_size, log_path, save_log=True):
|
7 |
-
"""shuffle image indices for each training epoch and save to log"""
|
8 |
-
|
9 |
-
np.random.seed(0)
|
10 |
-
num_train_images = len(train_inds)
|
11 |
-
num_epochs = int(np.ceil((1. * train_iter) / (1. * num_train_images / batch_size)))+1
|
12 |
-
epoch_inds_shuffle = np.zeros((num_epochs, num_train_images)).astype(int)
|
13 |
-
img_inds = np.arange(num_train_images)
|
14 |
-
for i in range(num_epochs):
|
15 |
-
np.random.shuffle(img_inds)
|
16 |
-
epoch_inds_shuffle[i, :] = img_inds
|
17 |
-
|
18 |
-
if save_log:
|
19 |
-
with open(os.path.join(log_path, "train_val_shuffle_inds.csv"), "wb") as f:
|
20 |
-
if valid_inds is not None:
|
21 |
-
f.write(b'valid inds\n')
|
22 |
-
np.savetxt(f, valid_inds.reshape(1, -1), fmt='%i', delimiter=",")
|
23 |
-
f.write(b'train inds\n')
|
24 |
-
np.savetxt(f, train_inds.reshape(1, -1), fmt='%i', delimiter=",")
|
25 |
-
f.write(b'shuffle inds\n')
|
26 |
-
np.savetxt(f, epoch_inds_shuffle, fmt='%i', delimiter=",")
|
27 |
-
|
28 |
-
return epoch_inds_shuffle
|
29 |
-
|
30 |
-
|
31 |
-
def gaussian(x, y, x0, y0, sigma=6):
|
32 |
-
return 1./(np.sqrt(2*np.pi)*sigma) * np.exp(-0.5 * ((x-x0)**2 + (y-y0)**2) / sigma**2)
|
33 |
-
|
34 |
-
|
35 |
-
def create_gaussian_filter(sigma=6, win_mult=3.5):
|
36 |
-
win_size = int(win_mult * sigma)
|
37 |
-
x, y = np.mgrid[0:2*win_size+1, 0:2*win_size+1]
|
38 |
-
gauss_filt = (8./3)*sigma*gaussian(x, y, win_size, win_size, sigma=sigma) # same as in ECT
|
39 |
-
return gauss_filt
|
40 |
-
|
41 |
-
|
42 |
-
def load_images(img_list, batch_inds, image_size=256, c_dim=3, scale=255):
|
43 |
-
|
44 |
-
""" load images as a numpy array from menpo image list """
|
45 |
-
|
46 |
-
num_inputs = len(batch_inds)
|
47 |
-
batch_menpo_images = img_list[batch_inds]
|
48 |
-
|
49 |
-
images = np.zeros([num_inputs, image_size, image_size, c_dim]).astype('float32')
|
50 |
-
|
51 |
-
for ind, img in enumerate(batch_menpo_images):
|
52 |
-
if img.n_channels < 3 and c_dim == 3:
|
53 |
-
images[ind, :, :, :] = gray2rgb(img.pixels_with_channels_at_back())
|
54 |
-
else:
|
55 |
-
images[ind, :, :, :] = img.pixels_with_channels_at_back()
|
56 |
-
|
57 |
-
if scale is 255:
|
58 |
-
images *= 255
|
59 |
-
elif scale is 0:
|
60 |
-
images = 2 * images - 1
|
61 |
-
|
62 |
-
return images
|
63 |
-
|
64 |
-
|
65 |
-
# loading functions with pre-allocation and approx heat-map generation
|
66 |
-
|
67 |
-
|
68 |
-
def create_approx_heat_maps_alloc_once(landmarks, maps, gauss_filt=None, win_mult=3.5, num_landmarks=68, image_size=256,
|
69 |
-
sigma=6):
|
70 |
-
""" create heatmaps from input landmarks"""
|
71 |
-
maps.fill(0.)
|
72 |
-
|
73 |
-
win_size = int(win_mult * sigma)
|
74 |
-
filt_size = 2 * win_size + 1
|
75 |
-
landmarks = landmarks.astype(int)
|
76 |
-
|
77 |
-
if gauss_filt is None:
|
78 |
-
x_small, y_small = np.mgrid[0:2 * win_size + 1, 0:2 * win_size + 1]
|
79 |
-
gauss_filt = (8. / 3) * sigma * gaussian(x_small, y_small, win_size, win_size, sigma=sigma) # same as in ECT
|
80 |
-
|
81 |
-
for i in range(num_landmarks):
|
82 |
-
|
83 |
-
min_row = landmarks[i, 0] - win_size
|
84 |
-
max_row = landmarks[i, 0] + win_size + 1
|
85 |
-
min_col = landmarks[i, 1] - win_size
|
86 |
-
max_col = landmarks[i, 1] + win_size + 1
|
87 |
-
|
88 |
-
if min_row < 0:
|
89 |
-
min_row_gap = -1 * min_row
|
90 |
-
min_row = 0
|
91 |
-
else:
|
92 |
-
min_row_gap = 0
|
93 |
-
|
94 |
-
if min_col < 0:
|
95 |
-
min_col_gap = -1 * min_col
|
96 |
-
min_col = 0
|
97 |
-
else:
|
98 |
-
min_col_gap = 0
|
99 |
-
|
100 |
-
if max_row > image_size:
|
101 |
-
max_row_gap = max_row - image_size
|
102 |
-
max_row = image_size
|
103 |
-
else:
|
104 |
-
max_row_gap = 0
|
105 |
-
|
106 |
-
if max_col > image_size:
|
107 |
-
max_col_gap = max_col - image_size
|
108 |
-
max_col = image_size
|
109 |
-
else:
|
110 |
-
max_col_gap = 0
|
111 |
-
|
112 |
-
maps[min_row:max_row, min_col:max_col, i] =\
|
113 |
-
gauss_filt[min_row_gap:filt_size - 1 * max_row_gap, min_col_gap:filt_size - 1 * max_col_gap]
|
114 |
-
|
115 |
-
|
116 |
-
def load_images_landmarks_approx_maps_alloc_once(
|
117 |
-
img_list, batch_inds, images, maps_small, maps, landmarks, image_size=256, num_landmarks=68,
|
118 |
-
scale=255, gauss_filt_large=None, gauss_filt_small=None, win_mult=3.5, sigma=6, save_landmarks=False):
|
119 |
-
|
120 |
-
""" load images and gt landmarks from menpo image list, and create matching heatmaps """
|
121 |
-
|
122 |
-
batch_menpo_images = img_list[batch_inds]
|
123 |
-
c_dim = images.shape[-1]
|
124 |
-
grp_name = batch_menpo_images[0].landmarks.group_labels[0]
|
125 |
-
|
126 |
-
win_size_large = int(win_mult * sigma)
|
127 |
-
win_size_small = int(win_mult * (1.*sigma/4))
|
128 |
-
|
129 |
-
if gauss_filt_small is None:
|
130 |
-
x_small, y_small = np.mgrid[0:2 * win_size_small + 1, 0:2 * win_size_small + 1]
|
131 |
-
gauss_filt_small = (8. / 3) * (1.*sigma/4) * gaussian(
|
132 |
-
x_small, y_small, win_size_small, win_size_small, sigma=1.*sigma/4) # same as in ECT
|
133 |
-
if gauss_filt_large is None:
|
134 |
-
x_large, y_large = np.mgrid[0:2 * win_size_large + 1, 0:2 * win_size_large + 1]
|
135 |
-
gauss_filt_large = (8. / 3) * sigma * gaussian(x_large, y_large, win_size_large, win_size_large, sigma=sigma) # same as in ECT
|
136 |
-
|
137 |
-
for ind, img in enumerate(batch_menpo_images):
|
138 |
-
if img.n_channels < 3 and c_dim == 3:
|
139 |
-
images[ind, :, :, :] = gray2rgb(img.pixels_with_channels_at_back())
|
140 |
-
else:
|
141 |
-
images[ind, :, :, :] = img.pixels_with_channels_at_back()
|
142 |
-
|
143 |
-
lms = img.landmarks[grp_name].points
|
144 |
-
lms = np.minimum(lms, image_size - 1)
|
145 |
-
create_approx_heat_maps_alloc_once(
|
146 |
-
landmarks=lms, maps=maps[ind, :, :, :], gauss_filt=gauss_filt_large, win_mult=win_mult,
|
147 |
-
num_landmarks=num_landmarks, image_size=image_size, sigma=sigma)
|
148 |
-
|
149 |
-
lms_small = img.resize([image_size / 4, image_size / 4]).landmarks[grp_name].points
|
150 |
-
lms_small = np.minimum(lms_small, image_size / 4 - 1)
|
151 |
-
create_approx_heat_maps_alloc_once(
|
152 |
-
landmarks=lms_small, maps=maps_small[ind, :, :, :], gauss_filt=gauss_filt_small, win_mult=win_mult,
|
153 |
-
num_landmarks=num_landmarks, image_size=image_size / 4, sigma=1. * sigma / 4)
|
154 |
-
|
155 |
-
if save_landmarks:
|
156 |
-
landmarks[ind, :, :] = lms
|
157 |
-
|
158 |
-
if scale is 255:
|
159 |
-
images *= 255
|
160 |
-
elif scale is 0:
|
161 |
-
images = 2 * images - 1
|
|
|
1 |
+
import numpy as np
|
2 |
+
import os
|
3 |
+
from skimage.color import gray2rgb
|
4 |
+
|
5 |
+
|
6 |
+
def train_val_shuffle_inds_per_epoch(valid_inds, train_inds, train_iter, batch_size, log_path, save_log=True):
|
7 |
+
"""shuffle image indices for each training epoch and save to log"""
|
8 |
+
|
9 |
+
np.random.seed(0)
|
10 |
+
num_train_images = len(train_inds)
|
11 |
+
num_epochs = int(np.ceil((1. * train_iter) / (1. * num_train_images / batch_size)))+1
|
12 |
+
epoch_inds_shuffle = np.zeros((num_epochs, num_train_images)).astype(int)
|
13 |
+
img_inds = np.arange(num_train_images)
|
14 |
+
for i in range(num_epochs):
|
15 |
+
np.random.shuffle(img_inds)
|
16 |
+
epoch_inds_shuffle[i, :] = img_inds
|
17 |
+
|
18 |
+
if save_log:
|
19 |
+
with open(os.path.join(log_path, "train_val_shuffle_inds.csv"), "wb") as f:
|
20 |
+
if valid_inds is not None:
|
21 |
+
f.write(b'valid inds\n')
|
22 |
+
np.savetxt(f, valid_inds.reshape(1, -1), fmt='%i', delimiter=",")
|
23 |
+
f.write(b'train inds\n')
|
24 |
+
np.savetxt(f, train_inds.reshape(1, -1), fmt='%i', delimiter=",")
|
25 |
+
f.write(b'shuffle inds\n')
|
26 |
+
np.savetxt(f, epoch_inds_shuffle, fmt='%i', delimiter=",")
|
27 |
+
|
28 |
+
return epoch_inds_shuffle
|
29 |
+
|
30 |
+
|
31 |
+
def gaussian(x, y, x0, y0, sigma=6):
|
32 |
+
return 1./(np.sqrt(2*np.pi)*sigma) * np.exp(-0.5 * ((x-x0)**2 + (y-y0)**2) / sigma**2)
|
33 |
+
|
34 |
+
|
35 |
+
def create_gaussian_filter(sigma=6, win_mult=3.5):
|
36 |
+
win_size = int(win_mult * sigma)
|
37 |
+
x, y = np.mgrid[0:2*win_size+1, 0:2*win_size+1]
|
38 |
+
gauss_filt = (8./3)*sigma*gaussian(x, y, win_size, win_size, sigma=sigma) # same as in ECT
|
39 |
+
return gauss_filt
|
40 |
+
|
41 |
+
|
42 |
+
def load_images(img_list, batch_inds, image_size=256, c_dim=3, scale=255):
|
43 |
+
|
44 |
+
""" load images as a numpy array from menpo image list """
|
45 |
+
|
46 |
+
num_inputs = len(batch_inds)
|
47 |
+
batch_menpo_images = img_list[batch_inds]
|
48 |
+
|
49 |
+
images = np.zeros([num_inputs, image_size, image_size, c_dim]).astype('float32')
|
50 |
+
|
51 |
+
for ind, img in enumerate(batch_menpo_images):
|
52 |
+
if img.n_channels < 3 and c_dim == 3:
|
53 |
+
images[ind, :, :, :] = gray2rgb(img.pixels_with_channels_at_back())
|
54 |
+
else:
|
55 |
+
images[ind, :, :, :] = img.pixels_with_channels_at_back()
|
56 |
+
|
57 |
+
if scale is 255:
|
58 |
+
images *= 255
|
59 |
+
elif scale is 0:
|
60 |
+
images = 2 * images - 1
|
61 |
+
|
62 |
+
return images
|
63 |
+
|
64 |
+
|
65 |
+
# loading functions with pre-allocation and approx heat-map generation
|
66 |
+
|
67 |
+
|
68 |
+
def create_approx_heat_maps_alloc_once(landmarks, maps, gauss_filt=None, win_mult=3.5, num_landmarks=68, image_size=256,
|
69 |
+
sigma=6):
|
70 |
+
""" create heatmaps from input landmarks"""
|
71 |
+
maps.fill(0.)
|
72 |
+
|
73 |
+
win_size = int(win_mult * sigma)
|
74 |
+
filt_size = 2 * win_size + 1
|
75 |
+
landmarks = landmarks.astype(int)
|
76 |
+
|
77 |
+
if gauss_filt is None:
|
78 |
+
x_small, y_small = np.mgrid[0:2 * win_size + 1, 0:2 * win_size + 1]
|
79 |
+
gauss_filt = (8. / 3) * sigma * gaussian(x_small, y_small, win_size, win_size, sigma=sigma) # same as in ECT
|
80 |
+
|
81 |
+
for i in range(num_landmarks):
|
82 |
+
|
83 |
+
min_row = landmarks[i, 0] - win_size
|
84 |
+
max_row = landmarks[i, 0] + win_size + 1
|
85 |
+
min_col = landmarks[i, 1] - win_size
|
86 |
+
max_col = landmarks[i, 1] + win_size + 1
|
87 |
+
|
88 |
+
if min_row < 0:
|
89 |
+
min_row_gap = -1 * min_row
|
90 |
+
min_row = 0
|
91 |
+
else:
|
92 |
+
min_row_gap = 0
|
93 |
+
|
94 |
+
if min_col < 0:
|
95 |
+
min_col_gap = -1 * min_col
|
96 |
+
min_col = 0
|
97 |
+
else:
|
98 |
+
min_col_gap = 0
|
99 |
+
|
100 |
+
if max_row > image_size:
|
101 |
+
max_row_gap = max_row - image_size
|
102 |
+
max_row = image_size
|
103 |
+
else:
|
104 |
+
max_row_gap = 0
|
105 |
+
|
106 |
+
if max_col > image_size:
|
107 |
+
max_col_gap = max_col - image_size
|
108 |
+
max_col = image_size
|
109 |
+
else:
|
110 |
+
max_col_gap = 0
|
111 |
+
|
112 |
+
maps[min_row:max_row, min_col:max_col, i] =\
|
113 |
+
gauss_filt[min_row_gap:filt_size - 1 * max_row_gap, min_col_gap:filt_size - 1 * max_col_gap]
|
114 |
+
|
115 |
+
|
116 |
+
def load_images_landmarks_approx_maps_alloc_once(
|
117 |
+
img_list, batch_inds, images, maps_small, maps, landmarks, image_size=256, num_landmarks=68,
|
118 |
+
scale=255, gauss_filt_large=None, gauss_filt_small=None, win_mult=3.5, sigma=6, save_landmarks=False):
|
119 |
+
|
120 |
+
""" load images and gt landmarks from menpo image list, and create matching heatmaps """
|
121 |
+
|
122 |
+
batch_menpo_images = img_list[batch_inds]
|
123 |
+
c_dim = images.shape[-1]
|
124 |
+
grp_name = batch_menpo_images[0].landmarks.group_labels[0]
|
125 |
+
|
126 |
+
win_size_large = int(win_mult * sigma)
|
127 |
+
win_size_small = int(win_mult * (1.*sigma/4))
|
128 |
+
|
129 |
+
if gauss_filt_small is None:
|
130 |
+
x_small, y_small = np.mgrid[0:2 * win_size_small + 1, 0:2 * win_size_small + 1]
|
131 |
+
gauss_filt_small = (8. / 3) * (1.*sigma/4) * gaussian(
|
132 |
+
x_small, y_small, win_size_small, win_size_small, sigma=1.*sigma/4) # same as in ECT
|
133 |
+
if gauss_filt_large is None:
|
134 |
+
x_large, y_large = np.mgrid[0:2 * win_size_large + 1, 0:2 * win_size_large + 1]
|
135 |
+
gauss_filt_large = (8. / 3) * sigma * gaussian(x_large, y_large, win_size_large, win_size_large, sigma=sigma) # same as in ECT
|
136 |
+
|
137 |
+
for ind, img in enumerate(batch_menpo_images):
|
138 |
+
if img.n_channels < 3 and c_dim == 3:
|
139 |
+
images[ind, :, :, :] = gray2rgb(img.pixels_with_channels_at_back())
|
140 |
+
else:
|
141 |
+
images[ind, :, :, :] = img.pixels_with_channels_at_back()
|
142 |
+
|
143 |
+
lms = img.landmarks[grp_name].points
|
144 |
+
lms = np.minimum(lms, image_size - 1)
|
145 |
+
create_approx_heat_maps_alloc_once(
|
146 |
+
landmarks=lms, maps=maps[ind, :, :, :], gauss_filt=gauss_filt_large, win_mult=win_mult,
|
147 |
+
num_landmarks=num_landmarks, image_size=image_size, sigma=sigma)
|
148 |
+
|
149 |
+
lms_small = img.resize([image_size / 4, image_size / 4]).landmarks[grp_name].points
|
150 |
+
lms_small = np.minimum(lms_small, image_size / 4 - 1)
|
151 |
+
create_approx_heat_maps_alloc_once(
|
152 |
+
landmarks=lms_small, maps=maps_small[ind, :, :, :], gauss_filt=gauss_filt_small, win_mult=win_mult,
|
153 |
+
num_landmarks=num_landmarks, image_size=image_size / 4, sigma=1. * sigma / 4)
|
154 |
+
|
155 |
+
if save_landmarks:
|
156 |
+
landmarks[ind, :, :] = lms
|
157 |
+
|
158 |
+
if scale is 255:
|
159 |
+
images *= 255
|
160 |
+
elif scale is 0:
|
161 |
+
images = 2 * images - 1
|
MakeItTalk/thirdparty/face_of_art/deep_heatmaps_model_fusion_net.py
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
MakeItTalk/thirdparty/face_of_art/deformation_functions.py
CHANGED
@@ -1,386 +1,386 @@
|
|
1 |
-
import numpy as np
|
2 |
-
|
3 |
-
|
4 |
-
def deform_part(landmarks, part_inds, scale_y=1., scale_x=1., shift_ver=0., shift_horiz=0.):
|
5 |
-
""" deform facial part landmarks - matching ibug annotations of 68 landmarks """
|
6 |
-
|
7 |
-
landmarks_part = landmarks[part_inds, :].copy()
|
8 |
-
part_mean = np.mean(landmarks_part, 0)
|
9 |
-
|
10 |
-
landmarks_norm = landmarks_part - part_mean
|
11 |
-
landmarks_deform = landmarks_norm.copy()
|
12 |
-
landmarks_deform[:, 1] = scale_x * landmarks_deform[:, 1]
|
13 |
-
landmarks_deform[:, 0] = scale_y * landmarks_deform[:, 0]
|
14 |
-
|
15 |
-
landmarks_deform = landmarks_deform + part_mean
|
16 |
-
landmarks_deform = landmarks_deform + shift_ver * np.array([1, 0]) + shift_horiz * np.array([0, 1])
|
17 |
-
|
18 |
-
deform_shape = landmarks.copy()
|
19 |
-
deform_shape[part_inds] = landmarks_deform
|
20 |
-
return deform_shape
|
21 |
-
|
22 |
-
|
23 |
-
def deform_mouth(lms, p_scale=0, p_shift=0, pad=5):
|
24 |
-
""" deform mouth landmarks - matching ibug annotations of 68 landmarks """
|
25 |
-
|
26 |
-
jaw_line_inds = np.arange(0, 17)
|
27 |
-
nose_inds = np.arange(27, 36)
|
28 |
-
mouth_inds = np.arange(48, 68)
|
29 |
-
|
30 |
-
part_inds = mouth_inds.copy()
|
31 |
-
|
32 |
-
# find part spatial limitations
|
33 |
-
jaw_pad = 4
|
34 |
-
x_max = np.max(lms[part_inds, 1]) + (np.max(lms[jaw_line_inds[jaw_pad:-jaw_pad], 1]) - np.max(
|
35 |
-
lms[part_inds, 1])) * 0.5 - pad
|
36 |
-
x_min = np.min(lms[jaw_line_inds[jaw_pad:-jaw_pad], 1]) + (np.min(lms[part_inds, 1]) - np.min(
|
37 |
-
lms[jaw_line_inds[jaw_pad:-jaw_pad], 1])) * 0.5 + pad
|
38 |
-
y_min = np.max(lms[nose_inds, 0]) + (np.min(lms[part_inds, 0]) - np.max(lms[nose_inds, 0])) * 0.5
|
39 |
-
max_jaw = np.minimum(np.max(lms[jaw_line_inds, 0]), lms[8, 0])
|
40 |
-
y_max = max_jaw - (max_jaw - np.max(lms[part_inds, 0])) * 0.5 - pad
|
41 |
-
|
42 |
-
# scale facial feature
|
43 |
-
scale = np.random.rand()
|
44 |
-
if p_scale > 0.5 and scale > 0.5:
|
45 |
-
|
46 |
-
part_mean = np.mean(lms[part_inds, :], 0)
|
47 |
-
lms_part_norm = lms[part_inds, :] - part_mean
|
48 |
-
|
49 |
-
part_y_bound_min, part_x_bound_min = np.min(lms_part_norm, 0)
|
50 |
-
part_y_bound_max, part_x_bound_max = np.max(lms_part_norm, 0)
|
51 |
-
|
52 |
-
scale_max_y = np.minimum(
|
53 |
-
(y_min - part_mean[0]) / part_y_bound_min,
|
54 |
-
(y_max - part_mean[0]) / part_y_bound_max)
|
55 |
-
scale_max_y = np.minimum(scale_max_y, 1.2)
|
56 |
-
|
57 |
-
scale_max_x = np.minimum(
|
58 |
-
(x_min - part_mean[1]) / part_x_bound_min,
|
59 |
-
(x_max - part_mean[1]) / part_x_bound_max)
|
60 |
-
scale_max_x = np.minimum(scale_max_x, 1.2)
|
61 |
-
|
62 |
-
scale_y = np.random.uniform(0.7, scale_max_y)
|
63 |
-
scale_x = np.random.uniform(0.7, scale_max_x)
|
64 |
-
|
65 |
-
lms_def_scale = deform_part(lms, part_inds, scale_y=scale_y, scale_x=scale_x, shift_ver=0., shift_horiz=0.)
|
66 |
-
|
67 |
-
# check for spatial errors
|
68 |
-
error = check_deformation_spatial_errors(lms_def_scale, part_inds, pad=pad)
|
69 |
-
if error:
|
70 |
-
lms_def_scale = lms.copy()
|
71 |
-
else:
|
72 |
-
lms_def_scale = lms.copy()
|
73 |
-
|
74 |
-
# shift facial feature
|
75 |
-
if p_shift > 0.5 and (np.random.rand() > 0.5 or not scale):
|
76 |
-
|
77 |
-
part_mean = np.mean(lms_def_scale[part_inds, :], 0)
|
78 |
-
lms_part_norm = lms_def_scale[part_inds, :] - part_mean
|
79 |
-
|
80 |
-
part_y_bound_min, part_x_bound_min = np.min(lms_part_norm, 0)
|
81 |
-
part_y_bound_max, part_x_bound_max = np.max(lms_part_norm, 0)
|
82 |
-
|
83 |
-
shift_x = np.random.uniform(x_min - (part_mean[1] + part_x_bound_min),
|
84 |
-
x_max - (part_mean[1] + part_x_bound_max))
|
85 |
-
shift_y = np.random.uniform(y_min - (part_mean[0] + part_y_bound_min),
|
86 |
-
y_max - (part_mean[0] + part_y_bound_max))
|
87 |
-
|
88 |
-
lms_def = deform_part(lms_def_scale, part_inds, scale_y=1., scale_x=1., shift_ver=shift_y, shift_horiz=shift_x)
|
89 |
-
error = check_deformation_spatial_errors(lms_def, part_inds, pad=pad)
|
90 |
-
if error:
|
91 |
-
lms_def = lms_def_scale.copy()
|
92 |
-
else:
|
93 |
-
lms_def = lms_def_scale.copy()
|
94 |
-
|
95 |
-
return lms_def
|
96 |
-
|
97 |
-
|
98 |
-
def deform_nose(lms, p_scale=0, p_shift=0, pad=5):
|
99 |
-
""" deform nose landmarks - matching ibug annotations of 68 landmarks """
|
100 |
-
|
101 |
-
nose_inds = np.arange(27, 36)
|
102 |
-
left_eye_inds = np.arange(36, 42)
|
103 |
-
right_eye_inds = np.arange(42, 48)
|
104 |
-
mouth_inds = np.arange(48, 68)
|
105 |
-
|
106 |
-
part_inds = nose_inds.copy()
|
107 |
-
|
108 |
-
# find part spatial limitations
|
109 |
-
x_max = np.max(lms[part_inds[:4], 1]) + (np.min(lms[right_eye_inds, 1]) - np.max(lms[part_inds[:4], 1])) * 0.5 - pad
|
110 |
-
x_min = np.max(lms[left_eye_inds, 1]) + (np.min(lms[part_inds[:4], 1]) - np.max(lms[left_eye_inds, 1])) * 0.5 + pad
|
111 |
-
|
112 |
-
max_brows = np.max(lms[21:23, 0])
|
113 |
-
y_min = np.min(lms[part_inds, 0]) + (max_brows - np.min(lms[part_inds, 0])) * 0.5
|
114 |
-
min_mouth = np.min(lms[mouth_inds, 0])
|
115 |
-
y_max = np.max(lms[part_inds, 0]) + (np.max(lms[part_inds, 0]) - min_mouth) * 0 - pad
|
116 |
-
|
117 |
-
# scale facial feature
|
118 |
-
scale = np.random.rand()
|
119 |
-
if p_scale > 0.5 and scale > 0.5:
|
120 |
-
|
121 |
-
part_mean = np.mean(lms[part_inds, :], 0)
|
122 |
-
lms_part_norm = lms[part_inds, :] - part_mean
|
123 |
-
|
124 |
-
part_y_bound_min = np.min(lms_part_norm[:, 0])
|
125 |
-
part_y_bound_max = np.max(lms_part_norm[:, 0])
|
126 |
-
|
127 |
-
scale_max_y = np.minimum(
|
128 |
-
(y_min - part_mean[0]) / part_y_bound_min,
|
129 |
-
(y_max - part_mean[0]) / part_y_bound_max)
|
130 |
-
scale_y = np.random.uniform(0.7, scale_max_y)
|
131 |
-
scale_x = np.random.uniform(0.7, 1.5)
|
132 |
-
|
133 |
-
lms_def_scale = deform_part(lms, part_inds, scale_y=scale_y, scale_x=scale_x, shift_ver=0., shift_horiz=0.)
|
134 |
-
|
135 |
-
error1 = check_deformation_spatial_errors(lms_def_scale, part_inds[:4], pad=pad)
|
136 |
-
error2 = check_deformation_spatial_errors(lms_def_scale, part_inds[4:], pad=pad)
|
137 |
-
error = error1 + error2
|
138 |
-
if error:
|
139 |
-
lms_def_scale = lms.copy()
|
140 |
-
else:
|
141 |
-
lms_def_scale = lms.copy()
|
142 |
-
|
143 |
-
# shift facial feature
|
144 |
-
if p_shift > 0.5 and (np.random.rand() > 0.5 or not scale):
|
145 |
-
|
146 |
-
part_mean = np.mean(lms_def_scale[part_inds, :], 0)
|
147 |
-
lms_part_norm = lms_def_scale[part_inds, :] - part_mean
|
148 |
-
|
149 |
-
part_x_bound_min = np.min(lms_part_norm[:4], 0)
|
150 |
-
part_x_bound_max = np.max(lms_part_norm[:4], 0)
|
151 |
-
part_y_bound_min = np.min(lms_part_norm[:, 0])
|
152 |
-
part_y_bound_max = np.max(lms_part_norm[:, 0])
|
153 |
-
|
154 |
-
shift_x = np.random.uniform(x_min - (part_mean[1] + part_x_bound_min),
|
155 |
-
x_max - (part_mean[1] + part_x_bound_max))
|
156 |
-
shift_y = np.random.uniform(y_min - (part_mean[0] + part_y_bound_min),
|
157 |
-
y_max - (part_mean[0] + part_y_bound_max))
|
158 |
-
|
159 |
-
lms_def = deform_part(lms_def_scale, part_inds, scale_y=1., scale_x=1., shift_ver=shift_y, shift_horiz=shift_x)
|
160 |
-
|
161 |
-
error1 = check_deformation_spatial_errors(lms_def, part_inds[:4], pad=pad)
|
162 |
-
error2 = check_deformation_spatial_errors(lms_def, part_inds[4:], pad=pad)
|
163 |
-
error = error1 + error2
|
164 |
-
if error:
|
165 |
-
lms_def = lms_def_scale.copy()
|
166 |
-
else:
|
167 |
-
lms_def = lms_def_scale.copy()
|
168 |
-
|
169 |
-
return lms_def
|
170 |
-
|
171 |
-
|
172 |
-
def deform_eyes(lms, p_scale=0, p_shift=0, pad=10):
|
173 |
-
""" deform eyes + eyebrows landmarks - matching ibug annotations of 68 landmarks """
|
174 |
-
|
175 |
-
nose_inds = np.arange(27, 36)
|
176 |
-
left_eye_inds = np.arange(36, 42)
|
177 |
-
right_eye_inds = np.arange(42, 48)
|
178 |
-
left_brow_inds = np.arange(17, 22)
|
179 |
-
right_brow_inds = np.arange(22, 27)
|
180 |
-
|
181 |
-
part_inds_right = np.hstack((right_brow_inds, right_eye_inds))
|
182 |
-
part_inds_left = np.hstack((left_brow_inds, left_eye_inds))
|
183 |
-
|
184 |
-
# find part spatial limitations
|
185 |
-
|
186 |
-
# right eye+eyebrow
|
187 |
-
x_max_right = np.max(lms[part_inds_right, 1]) + (lms[16, 1] - np.max(lms[part_inds_right, 1])) * 0.5 - pad
|
188 |
-
x_min_right = np.max(lms[nose_inds[:4], 1]) + (np.min(lms[part_inds_right, 1]) - np.max(
|
189 |
-
lms[nose_inds[:4], 1])) * 0.5 + pad
|
190 |
-
y_max_right = np.max(lms[part_inds_right, 0]) + (lms[33, 0] - np.max(lms[part_inds_right, 0])) * 0.25 - pad
|
191 |
-
y_min_right = 2 * pad
|
192 |
-
|
193 |
-
# left eye+eyebrow
|
194 |
-
x_max_left = np.max(lms[part_inds_left, 1]) + (np.min(lms[nose_inds[:4], 1]) - np.max(
|
195 |
-
lms[part_inds_left, 1])) * 0.5 - pad
|
196 |
-
x_min_left = lms[0, 1] + (np.min(lms[part_inds_left, 1]) - lms[0, 1]) * 0.5 + pad
|
197 |
-
|
198 |
-
y_max_left = np.max(lms[part_inds_left, 0]) + (lms[33, 0] - np.max(lms[part_inds_left, 0])) * 0.25 - pad
|
199 |
-
y_min_left = 2 * pad
|
200 |
-
|
201 |
-
# scale facial feature
|
202 |
-
scale = np.random.rand()
|
203 |
-
if p_scale > 0.5 and scale > 0.5:
|
204 |
-
|
205 |
-
# right eye+eyebrow
|
206 |
-
part_mean = np.mean(lms[part_inds_right, :], 0)
|
207 |
-
lms_part_norm = lms[part_inds_right, :] - part_mean
|
208 |
-
|
209 |
-
part_y_bound_min, part_x_bound_min = np.min(lms_part_norm, 0)
|
210 |
-
part_y_bound_max, part_x_bound_max = np.max(lms_part_norm, 0)
|
211 |
-
|
212 |
-
scale_max_y = np.minimum(
|
213 |
-
(y_min_right - part_mean[0]) / part_y_bound_min,
|
214 |
-
(y_max_right - part_mean[0]) / part_y_bound_max)
|
215 |
-
scale_max_y_right = np.minimum(scale_max_y, 1.5)
|
216 |
-
|
217 |
-
scale_max_x = np.minimum(
|
218 |
-
(x_min_right - part_mean[1]) / part_x_bound_min,
|
219 |
-
(x_max_right - part_mean[1]) / part_x_bound_max)
|
220 |
-
scale_max_x_right = np.minimum(scale_max_x, 1.5)
|
221 |
-
|
222 |
-
# left eye+eyebrow
|
223 |
-
part_mean = np.mean(lms[part_inds_left, :], 0)
|
224 |
-
lms_part_norm = lms[part_inds_left, :] - part_mean
|
225 |
-
|
226 |
-
part_y_bound_min, part_x_bound_min = np.min(lms_part_norm, 0)
|
227 |
-
part_y_bound_max, part_x_bound_max = np.max(lms_part_norm, 0)
|
228 |
-
|
229 |
-
scale_max_y = np.minimum(
|
230 |
-
(y_min_left - part_mean[0]) / part_y_bound_min,
|
231 |
-
(y_max_left - part_mean[0]) / part_y_bound_max)
|
232 |
-
scale_max_y_left = np.minimum(scale_max_y, 1.5)
|
233 |
-
|
234 |
-
scale_max_x = np.minimum(
|
235 |
-
(x_min_left - part_mean[1]) / part_x_bound_min,
|
236 |
-
(x_max_left - part_mean[1]) / part_x_bound_max)
|
237 |
-
scale_max_x_left = np.minimum(scale_max_x, 1.5)
|
238 |
-
|
239 |
-
scale_max_x = np.minimum(scale_max_x_left, scale_max_x_right)
|
240 |
-
scale_max_y = np.minimum(scale_max_y_left, scale_max_y_right)
|
241 |
-
scale_y = np.random.uniform(0.8, scale_max_y)
|
242 |
-
scale_x = np.random.uniform(0.8, scale_max_x)
|
243 |
-
|
244 |
-
lms_def_scale = deform_part(lms, part_inds_right, scale_y=scale_y, scale_x=scale_x, shift_ver=0.,
|
245 |
-
shift_horiz=0.)
|
246 |
-
lms_def_scale = deform_part(lms_def_scale.copy(), part_inds_left, scale_y=scale_y, scale_x=scale_x,
|
247 |
-
shift_ver=0., shift_horiz=0.)
|
248 |
-
|
249 |
-
error1 = check_deformation_spatial_errors(lms_def_scale, part_inds_right, pad=pad)
|
250 |
-
error2 = check_deformation_spatial_errors(lms_def_scale, part_inds_left, pad=pad)
|
251 |
-
error = error1 + error2
|
252 |
-
if error:
|
253 |
-
lms_def_scale = lms.copy()
|
254 |
-
else:
|
255 |
-
lms_def_scale = lms.copy()
|
256 |
-
|
257 |
-
# shift facial feature
|
258 |
-
if p_shift > 0.5 and (np.random.rand() > 0.5 or not scale):
|
259 |
-
|
260 |
-
y_min_right = np.maximum(0.8 * np.min(lms_def_scale[part_inds_right, 0]), pad)
|
261 |
-
y_min_left = np.maximum(0.8 * np.min(lms_def_scale[part_inds_left, 0]), pad)
|
262 |
-
|
263 |
-
# right eye
|
264 |
-
part_mean = np.mean(lms_def_scale[part_inds_right, :], 0)
|
265 |
-
lms_part_norm = lms_def_scale[part_inds_right, :] - part_mean
|
266 |
-
|
267 |
-
part_y_bound_min, part_x_bound_min = np.min(lms_part_norm, 0)
|
268 |
-
part_y_bound_max, part_x_bound_max = np.max(lms_part_norm, 0)
|
269 |
-
|
270 |
-
shift_x = np.random.uniform(x_min_right - (part_mean[1] + part_x_bound_min),
|
271 |
-
x_max_right - (part_mean[1] + part_x_bound_max))
|
272 |
-
shift_y = np.random.uniform(y_min_right - (part_mean[0] + part_y_bound_min),
|
273 |
-
y_max_right - (part_mean[0] + part_y_bound_max))
|
274 |
-
|
275 |
-
lms_def_right = deform_part(lms_def_scale, part_inds_right, scale_y=1., scale_x=1., shift_ver=shift_y,
|
276 |
-
shift_horiz=shift_x)
|
277 |
-
|
278 |
-
error1 = check_deformation_spatial_errors(lms_def_right, part_inds_right, pad=pad)
|
279 |
-
if error1:
|
280 |
-
lms_def_right = lms_def_scale.copy()
|
281 |
-
|
282 |
-
# left eye
|
283 |
-
part_mean = np.mean(lms_def_scale[part_inds_left, :], 0)
|
284 |
-
lms_part_norm = lms_def_scale[part_inds_left, :] - part_mean
|
285 |
-
|
286 |
-
part_y_bound_min, part_x_bound_min = np.min(lms_part_norm, 0)
|
287 |
-
part_y_bound_max, part_x_bound_max = np.max(lms_part_norm, 0)
|
288 |
-
|
289 |
-
shift_x = np.random.uniform(x_min_left - (part_mean[1] + part_x_bound_min),
|
290 |
-
x_max_left - (part_mean[1] + part_x_bound_max))
|
291 |
-
shift_y = np.random.uniform(y_min_left - (part_mean[0] + part_y_bound_min),
|
292 |
-
y_max_left - (part_mean[0] + part_y_bound_max))
|
293 |
-
|
294 |
-
lms_def = deform_part(lms_def_right.copy(), part_inds_left, scale_y=1., scale_x=1., shift_ver=shift_y,
|
295 |
-
shift_horiz=shift_x)
|
296 |
-
|
297 |
-
error2 = check_deformation_spatial_errors(lms_def, part_inds_left, pad=pad)
|
298 |
-
if error2:
|
299 |
-
lms_def = lms_def_right.copy()
|
300 |
-
else:
|
301 |
-
lms_def = lms_def_scale.copy()
|
302 |
-
|
303 |
-
return lms_def
|
304 |
-
|
305 |
-
|
306 |
-
def deform_scale_face(lms, p_scale=0, pad=5, image_size=256):
|
307 |
-
""" change face landmarks scale & aspect ratio - matching ibug annotations of 68 landmarks """
|
308 |
-
|
309 |
-
part_inds = np.arange(68)
|
310 |
-
|
311 |
-
# find spatial limitations
|
312 |
-
x_max = np.max(lms[part_inds, 1]) + (image_size - np.max(lms[part_inds, 1])) * 0.5 - pad
|
313 |
-
x_min = np.min(lms[part_inds, 1]) * 0.5 + pad
|
314 |
-
|
315 |
-
y_min = 2 * pad
|
316 |
-
y_max = np.max(lms[part_inds, 0]) + (image_size - np.max(lms[part_inds, 0])) * 0.5 - pad
|
317 |
-
|
318 |
-
if p_scale > 0.5:
|
319 |
-
|
320 |
-
part_mean = np.mean(lms[part_inds, :], 0)
|
321 |
-
lms_part_norm = lms[part_inds, :] - part_mean
|
322 |
-
|
323 |
-
part_y_bound_min, part_x_bound_min = np.min(lms_part_norm, 0)
|
324 |
-
part_y_bound_max, part_x_bound_max = np.max(lms_part_norm, 0)
|
325 |
-
|
326 |
-
scale_max_y = np.minimum(
|
327 |
-
(y_min - part_mean[0]) / part_y_bound_min,
|
328 |
-
(y_max - part_mean[0]) / part_y_bound_max)
|
329 |
-
scale_max_y = np.minimum(scale_max_y, 1.2)
|
330 |
-
|
331 |
-
scale_max_x = np.minimum(
|
332 |
-
(x_min - part_mean[1]) / part_x_bound_min,
|
333 |
-
(x_max - part_mean[1]) / part_x_bound_max)
|
334 |
-
scale_max_x = np.minimum(scale_max_x, 1.2)
|
335 |
-
|
336 |
-
scale_y = np.random.uniform(0.6, scale_max_y)
|
337 |
-
scale_x = np.random.uniform(0.6, scale_max_x)
|
338 |
-
|
339 |
-
lms_def_scale = deform_part(lms, part_inds, scale_y=scale_y, scale_x=scale_x, shift_ver=0., shift_horiz=0.)
|
340 |
-
|
341 |
-
# check for spatial errors
|
342 |
-
error2 = np.sum(lms_def_scale >= image_size) + np.sum(lms_def_scale < 0)
|
343 |
-
error1 = len(np.unique((lms_def_scale).astype('int'), axis=0)) != len(lms_def_scale)
|
344 |
-
error = error1 + error2
|
345 |
-
if error:
|
346 |
-
lms_def_scale = lms.copy()
|
347 |
-
else:
|
348 |
-
lms_def_scale = lms.copy()
|
349 |
-
|
350 |
-
return lms_def_scale
|
351 |
-
|
352 |
-
|
353 |
-
def deform_face_geometric_style(lms, p_scale=0, p_shift=0):
|
354 |
-
""" deform facial landmarks - matching ibug annotations of 68 landmarks """
|
355 |
-
|
356 |
-
lms = deform_scale_face(lms.copy(), p_scale=p_scale, pad=0)
|
357 |
-
lms = deform_nose(lms.copy(), p_scale=p_scale, p_shift=p_shift, pad=0)
|
358 |
-
lms = deform_mouth(lms.copy(), p_scale=p_scale, p_shift=p_shift, pad=0)
|
359 |
-
lms = deform_eyes(lms.copy(), p_scale=p_scale, p_shift=p_shift, pad=0)
|
360 |
-
return lms
|
361 |
-
|
362 |
-
|
363 |
-
def get_bounds(lms):
|
364 |
-
part_y_bound_min, part_x_bound_min = np.min(lms,0)
|
365 |
-
part_y_bound_max, part_x_bound_max = np.max(lms,0)
|
366 |
-
return np.array([[part_x_bound_min, part_x_bound_max], [part_y_bound_min, part_y_bound_max]])
|
367 |
-
|
368 |
-
|
369 |
-
def part_intersection(part_to_check, points_to_compare, pad=0):
|
370 |
-
points_to_compare = np.round(points_to_compare.copy())
|
371 |
-
check_bounds = np.round(get_bounds(part_to_check))
|
372 |
-
check_bounds[:, 0] += pad
|
373 |
-
check_bounds[:, 1] -= pad
|
374 |
-
inds_y = np.where(np.logical_and(points_to_compare[:,0] > check_bounds[1,0], points_to_compare[:,0]<check_bounds[1,1]))
|
375 |
-
inds_x = np.where(np.logical_and(points_to_compare[:,1] > check_bounds[0,0], points_to_compare[:,1]<check_bounds[0,1]))
|
376 |
-
return np.intersect1d(inds_y, inds_x)
|
377 |
-
|
378 |
-
|
379 |
-
def check_deformation_spatial_errors(def_landmarks, part_inds,pad=0):
|
380 |
-
""" check for spatial errors in deformed landmarks"""
|
381 |
-
|
382 |
-
part_to_check = def_landmarks[part_inds, :].copy()
|
383 |
-
points_to_compare = np.delete(def_landmarks, part_inds,axis=0).reshape(-1,2)
|
384 |
-
inter_inds = part_intersection(part_to_check,points_to_compare, pad=pad)
|
385 |
-
out = len(inter_inds) > 0
|
386 |
-
return out
|
|
|
1 |
+
import numpy as np
|
2 |
+
|
3 |
+
|
4 |
+
def deform_part(landmarks, part_inds, scale_y=1., scale_x=1., shift_ver=0., shift_horiz=0.):
|
5 |
+
""" deform facial part landmarks - matching ibug annotations of 68 landmarks """
|
6 |
+
|
7 |
+
landmarks_part = landmarks[part_inds, :].copy()
|
8 |
+
part_mean = np.mean(landmarks_part, 0)
|
9 |
+
|
10 |
+
landmarks_norm = landmarks_part - part_mean
|
11 |
+
landmarks_deform = landmarks_norm.copy()
|
12 |
+
landmarks_deform[:, 1] = scale_x * landmarks_deform[:, 1]
|
13 |
+
landmarks_deform[:, 0] = scale_y * landmarks_deform[:, 0]
|
14 |
+
|
15 |
+
landmarks_deform = landmarks_deform + part_mean
|
16 |
+
landmarks_deform = landmarks_deform + shift_ver * np.array([1, 0]) + shift_horiz * np.array([0, 1])
|
17 |
+
|
18 |
+
deform_shape = landmarks.copy()
|
19 |
+
deform_shape[part_inds] = landmarks_deform
|
20 |
+
return deform_shape
|
21 |
+
|
22 |
+
|
23 |
+
def deform_mouth(lms, p_scale=0, p_shift=0, pad=5):
|
24 |
+
""" deform mouth landmarks - matching ibug annotations of 68 landmarks """
|
25 |
+
|
26 |
+
jaw_line_inds = np.arange(0, 17)
|
27 |
+
nose_inds = np.arange(27, 36)
|
28 |
+
mouth_inds = np.arange(48, 68)
|
29 |
+
|
30 |
+
part_inds = mouth_inds.copy()
|
31 |
+
|
32 |
+
# find part spatial limitations
|
33 |
+
jaw_pad = 4
|
34 |
+
x_max = np.max(lms[part_inds, 1]) + (np.max(lms[jaw_line_inds[jaw_pad:-jaw_pad], 1]) - np.max(
|
35 |
+
lms[part_inds, 1])) * 0.5 - pad
|
36 |
+
x_min = np.min(lms[jaw_line_inds[jaw_pad:-jaw_pad], 1]) + (np.min(lms[part_inds, 1]) - np.min(
|
37 |
+
lms[jaw_line_inds[jaw_pad:-jaw_pad], 1])) * 0.5 + pad
|
38 |
+
y_min = np.max(lms[nose_inds, 0]) + (np.min(lms[part_inds, 0]) - np.max(lms[nose_inds, 0])) * 0.5
|
39 |
+
max_jaw = np.minimum(np.max(lms[jaw_line_inds, 0]), lms[8, 0])
|
40 |
+
y_max = max_jaw - (max_jaw - np.max(lms[part_inds, 0])) * 0.5 - pad
|
41 |
+
|
42 |
+
# scale facial feature
|
43 |
+
scale = np.random.rand()
|
44 |
+
if p_scale > 0.5 and scale > 0.5:
|
45 |
+
|
46 |
+
part_mean = np.mean(lms[part_inds, :], 0)
|
47 |
+
lms_part_norm = lms[part_inds, :] - part_mean
|
48 |
+
|
49 |
+
part_y_bound_min, part_x_bound_min = np.min(lms_part_norm, 0)
|
50 |
+
part_y_bound_max, part_x_bound_max = np.max(lms_part_norm, 0)
|
51 |
+
|
52 |
+
scale_max_y = np.minimum(
|
53 |
+
(y_min - part_mean[0]) / part_y_bound_min,
|
54 |
+
(y_max - part_mean[0]) / part_y_bound_max)
|
55 |
+
scale_max_y = np.minimum(scale_max_y, 1.2)
|
56 |
+
|
57 |
+
scale_max_x = np.minimum(
|
58 |
+
(x_min - part_mean[1]) / part_x_bound_min,
|
59 |
+
(x_max - part_mean[1]) / part_x_bound_max)
|
60 |
+
scale_max_x = np.minimum(scale_max_x, 1.2)
|
61 |
+
|
62 |
+
scale_y = np.random.uniform(0.7, scale_max_y)
|
63 |
+
scale_x = np.random.uniform(0.7, scale_max_x)
|
64 |
+
|
65 |
+
lms_def_scale = deform_part(lms, part_inds, scale_y=scale_y, scale_x=scale_x, shift_ver=0., shift_horiz=0.)
|
66 |
+
|
67 |
+
# check for spatial errors
|
68 |
+
error = check_deformation_spatial_errors(lms_def_scale, part_inds, pad=pad)
|
69 |
+
if error:
|
70 |
+
lms_def_scale = lms.copy()
|
71 |
+
else:
|
72 |
+
lms_def_scale = lms.copy()
|
73 |
+
|
74 |
+
# shift facial feature
|
75 |
+
if p_shift > 0.5 and (np.random.rand() > 0.5 or not scale):
|
76 |
+
|
77 |
+
part_mean = np.mean(lms_def_scale[part_inds, :], 0)
|
78 |
+
lms_part_norm = lms_def_scale[part_inds, :] - part_mean
|
79 |
+
|
80 |
+
part_y_bound_min, part_x_bound_min = np.min(lms_part_norm, 0)
|
81 |
+
part_y_bound_max, part_x_bound_max = np.max(lms_part_norm, 0)
|
82 |
+
|
83 |
+
shift_x = np.random.uniform(x_min - (part_mean[1] + part_x_bound_min),
|
84 |
+
x_max - (part_mean[1] + part_x_bound_max))
|
85 |
+
shift_y = np.random.uniform(y_min - (part_mean[0] + part_y_bound_min),
|
86 |
+
y_max - (part_mean[0] + part_y_bound_max))
|
87 |
+
|
88 |
+
lms_def = deform_part(lms_def_scale, part_inds, scale_y=1., scale_x=1., shift_ver=shift_y, shift_horiz=shift_x)
|
89 |
+
error = check_deformation_spatial_errors(lms_def, part_inds, pad=pad)
|
90 |
+
if error:
|
91 |
+
lms_def = lms_def_scale.copy()
|
92 |
+
else:
|
93 |
+
lms_def = lms_def_scale.copy()
|
94 |
+
|
95 |
+
return lms_def
|
96 |
+
|
97 |
+
|
98 |
+
def deform_nose(lms, p_scale=0, p_shift=0, pad=5):
|
99 |
+
""" deform nose landmarks - matching ibug annotations of 68 landmarks """
|
100 |
+
|
101 |
+
nose_inds = np.arange(27, 36)
|
102 |
+
left_eye_inds = np.arange(36, 42)
|
103 |
+
right_eye_inds = np.arange(42, 48)
|
104 |
+
mouth_inds = np.arange(48, 68)
|
105 |
+
|
106 |
+
part_inds = nose_inds.copy()
|
107 |
+
|
108 |
+
# find part spatial limitations
|
109 |
+
x_max = np.max(lms[part_inds[:4], 1]) + (np.min(lms[right_eye_inds, 1]) - np.max(lms[part_inds[:4], 1])) * 0.5 - pad
|
110 |
+
x_min = np.max(lms[left_eye_inds, 1]) + (np.min(lms[part_inds[:4], 1]) - np.max(lms[left_eye_inds, 1])) * 0.5 + pad
|
111 |
+
|
112 |
+
max_brows = np.max(lms[21:23, 0])
|
113 |
+
y_min = np.min(lms[part_inds, 0]) + (max_brows - np.min(lms[part_inds, 0])) * 0.5
|
114 |
+
min_mouth = np.min(lms[mouth_inds, 0])
|
115 |
+
y_max = np.max(lms[part_inds, 0]) + (np.max(lms[part_inds, 0]) - min_mouth) * 0 - pad
|
116 |
+
|
117 |
+
# scale facial feature
|
118 |
+
scale = np.random.rand()
|
119 |
+
if p_scale > 0.5 and scale > 0.5:
|
120 |
+
|
121 |
+
part_mean = np.mean(lms[part_inds, :], 0)
|
122 |
+
lms_part_norm = lms[part_inds, :] - part_mean
|
123 |
+
|
124 |
+
part_y_bound_min = np.min(lms_part_norm[:, 0])
|
125 |
+
part_y_bound_max = np.max(lms_part_norm[:, 0])
|
126 |
+
|
127 |
+
scale_max_y = np.minimum(
|
128 |
+
(y_min - part_mean[0]) / part_y_bound_min,
|
129 |
+
(y_max - part_mean[0]) / part_y_bound_max)
|
130 |
+
scale_y = np.random.uniform(0.7, scale_max_y)
|
131 |
+
scale_x = np.random.uniform(0.7, 1.5)
|
132 |
+
|
133 |
+
lms_def_scale = deform_part(lms, part_inds, scale_y=scale_y, scale_x=scale_x, shift_ver=0., shift_horiz=0.)
|
134 |
+
|
135 |
+
error1 = check_deformation_spatial_errors(lms_def_scale, part_inds[:4], pad=pad)
|
136 |
+
error2 = check_deformation_spatial_errors(lms_def_scale, part_inds[4:], pad=pad)
|
137 |
+
error = error1 + error2
|
138 |
+
if error:
|
139 |
+
lms_def_scale = lms.copy()
|
140 |
+
else:
|
141 |
+
lms_def_scale = lms.copy()
|
142 |
+
|
143 |
+
# shift facial feature
|
144 |
+
if p_shift > 0.5 and (np.random.rand() > 0.5 or not scale):
|
145 |
+
|
146 |
+
part_mean = np.mean(lms_def_scale[part_inds, :], 0)
|
147 |
+
lms_part_norm = lms_def_scale[part_inds, :] - part_mean
|
148 |
+
|
149 |
+
part_x_bound_min = np.min(lms_part_norm[:4], 0)
|
150 |
+
part_x_bound_max = np.max(lms_part_norm[:4], 0)
|
151 |
+
part_y_bound_min = np.min(lms_part_norm[:, 0])
|
152 |
+
part_y_bound_max = np.max(lms_part_norm[:, 0])
|
153 |
+
|
154 |
+
shift_x = np.random.uniform(x_min - (part_mean[1] + part_x_bound_min),
|
155 |
+
x_max - (part_mean[1] + part_x_bound_max))
|
156 |
+
shift_y = np.random.uniform(y_min - (part_mean[0] + part_y_bound_min),
|
157 |
+
y_max - (part_mean[0] + part_y_bound_max))
|
158 |
+
|
159 |
+
lms_def = deform_part(lms_def_scale, part_inds, scale_y=1., scale_x=1., shift_ver=shift_y, shift_horiz=shift_x)
|
160 |
+
|
161 |
+
error1 = check_deformation_spatial_errors(lms_def, part_inds[:4], pad=pad)
|
162 |
+
error2 = check_deformation_spatial_errors(lms_def, part_inds[4:], pad=pad)
|
163 |
+
error = error1 + error2
|
164 |
+
if error:
|
165 |
+
lms_def = lms_def_scale.copy()
|
166 |
+
else:
|
167 |
+
lms_def = lms_def_scale.copy()
|
168 |
+
|
169 |
+
return lms_def
|
170 |
+
|
171 |
+
|
172 |
+
def deform_eyes(lms, p_scale=0, p_shift=0, pad=10):
|
173 |
+
""" deform eyes + eyebrows landmarks - matching ibug annotations of 68 landmarks """
|
174 |
+
|
175 |
+
nose_inds = np.arange(27, 36)
|
176 |
+
left_eye_inds = np.arange(36, 42)
|
177 |
+
right_eye_inds = np.arange(42, 48)
|
178 |
+
left_brow_inds = np.arange(17, 22)
|
179 |
+
right_brow_inds = np.arange(22, 27)
|
180 |
+
|
181 |
+
part_inds_right = np.hstack((right_brow_inds, right_eye_inds))
|
182 |
+
part_inds_left = np.hstack((left_brow_inds, left_eye_inds))
|
183 |
+
|
184 |
+
# find part spatial limitations
|
185 |
+
|
186 |
+
# right eye+eyebrow
|
187 |
+
x_max_right = np.max(lms[part_inds_right, 1]) + (lms[16, 1] - np.max(lms[part_inds_right, 1])) * 0.5 - pad
|
188 |
+
x_min_right = np.max(lms[nose_inds[:4], 1]) + (np.min(lms[part_inds_right, 1]) - np.max(
|
189 |
+
lms[nose_inds[:4], 1])) * 0.5 + pad
|
190 |
+
y_max_right = np.max(lms[part_inds_right, 0]) + (lms[33, 0] - np.max(lms[part_inds_right, 0])) * 0.25 - pad
|
191 |
+
y_min_right = 2 * pad
|
192 |
+
|
193 |
+
# left eye+eyebrow
|
194 |
+
x_max_left = np.max(lms[part_inds_left, 1]) + (np.min(lms[nose_inds[:4], 1]) - np.max(
|
195 |
+
lms[part_inds_left, 1])) * 0.5 - pad
|
196 |
+
x_min_left = lms[0, 1] + (np.min(lms[part_inds_left, 1]) - lms[0, 1]) * 0.5 + pad
|
197 |
+
|
198 |
+
y_max_left = np.max(lms[part_inds_left, 0]) + (lms[33, 0] - np.max(lms[part_inds_left, 0])) * 0.25 - pad
|
199 |
+
y_min_left = 2 * pad
|
200 |
+
|
201 |
+
# scale facial feature
|
202 |
+
scale = np.random.rand()
|
203 |
+
if p_scale > 0.5 and scale > 0.5:
|
204 |
+
|
205 |
+
# right eye+eyebrow
|
206 |
+
part_mean = np.mean(lms[part_inds_right, :], 0)
|
207 |
+
lms_part_norm = lms[part_inds_right, :] - part_mean
|
208 |
+
|
209 |
+
part_y_bound_min, part_x_bound_min = np.min(lms_part_norm, 0)
|
210 |
+
part_y_bound_max, part_x_bound_max = np.max(lms_part_norm, 0)
|
211 |
+
|
212 |
+
scale_max_y = np.minimum(
|
213 |
+
(y_min_right - part_mean[0]) / part_y_bound_min,
|
214 |
+
(y_max_right - part_mean[0]) / part_y_bound_max)
|
215 |
+
scale_max_y_right = np.minimum(scale_max_y, 1.5)
|
216 |
+
|
217 |
+
scale_max_x = np.minimum(
|
218 |
+
(x_min_right - part_mean[1]) / part_x_bound_min,
|
219 |
+
(x_max_right - part_mean[1]) / part_x_bound_max)
|
220 |
+
scale_max_x_right = np.minimum(scale_max_x, 1.5)
|
221 |
+
|
222 |
+
# left eye+eyebrow
|
223 |
+
part_mean = np.mean(lms[part_inds_left, :], 0)
|
224 |
+
lms_part_norm = lms[part_inds_left, :] - part_mean
|
225 |
+
|
226 |
+
part_y_bound_min, part_x_bound_min = np.min(lms_part_norm, 0)
|
227 |
+
part_y_bound_max, part_x_bound_max = np.max(lms_part_norm, 0)
|
228 |
+
|
229 |
+
scale_max_y = np.minimum(
|
230 |
+
(y_min_left - part_mean[0]) / part_y_bound_min,
|
231 |
+
(y_max_left - part_mean[0]) / part_y_bound_max)
|
232 |
+
scale_max_y_left = np.minimum(scale_max_y, 1.5)
|
233 |
+
|
234 |
+
scale_max_x = np.minimum(
|
235 |
+
(x_min_left - part_mean[1]) / part_x_bound_min,
|
236 |
+
(x_max_left - part_mean[1]) / part_x_bound_max)
|
237 |
+
scale_max_x_left = np.minimum(scale_max_x, 1.5)
|
238 |
+
|
239 |
+
scale_max_x = np.minimum(scale_max_x_left, scale_max_x_right)
|
240 |
+
scale_max_y = np.minimum(scale_max_y_left, scale_max_y_right)
|
241 |
+
scale_y = np.random.uniform(0.8, scale_max_y)
|
242 |
+
scale_x = np.random.uniform(0.8, scale_max_x)
|
243 |
+
|
244 |
+
lms_def_scale = deform_part(lms, part_inds_right, scale_y=scale_y, scale_x=scale_x, shift_ver=0.,
|
245 |
+
shift_horiz=0.)
|
246 |
+
lms_def_scale = deform_part(lms_def_scale.copy(), part_inds_left, scale_y=scale_y, scale_x=scale_x,
|
247 |
+
shift_ver=0., shift_horiz=0.)
|
248 |
+
|
249 |
+
error1 = check_deformation_spatial_errors(lms_def_scale, part_inds_right, pad=pad)
|
250 |
+
error2 = check_deformation_spatial_errors(lms_def_scale, part_inds_left, pad=pad)
|
251 |
+
error = error1 + error2
|
252 |
+
if error:
|
253 |
+
lms_def_scale = lms.copy()
|
254 |
+
else:
|
255 |
+
lms_def_scale = lms.copy()
|
256 |
+
|
257 |
+
# shift facial feature
|
258 |
+
if p_shift > 0.5 and (np.random.rand() > 0.5 or not scale):
|
259 |
+
|
260 |
+
y_min_right = np.maximum(0.8 * np.min(lms_def_scale[part_inds_right, 0]), pad)
|
261 |
+
y_min_left = np.maximum(0.8 * np.min(lms_def_scale[part_inds_left, 0]), pad)
|
262 |
+
|
263 |
+
# right eye
|
264 |
+
part_mean = np.mean(lms_def_scale[part_inds_right, :], 0)
|
265 |
+
lms_part_norm = lms_def_scale[part_inds_right, :] - part_mean
|
266 |
+
|
267 |
+
part_y_bound_min, part_x_bound_min = np.min(lms_part_norm, 0)
|
268 |
+
part_y_bound_max, part_x_bound_max = np.max(lms_part_norm, 0)
|
269 |
+
|
270 |
+
shift_x = np.random.uniform(x_min_right - (part_mean[1] + part_x_bound_min),
|
271 |
+
x_max_right - (part_mean[1] + part_x_bound_max))
|
272 |
+
shift_y = np.random.uniform(y_min_right - (part_mean[0] + part_y_bound_min),
|
273 |
+
y_max_right - (part_mean[0] + part_y_bound_max))
|
274 |
+
|
275 |
+
lms_def_right = deform_part(lms_def_scale, part_inds_right, scale_y=1., scale_x=1., shift_ver=shift_y,
|
276 |
+
shift_horiz=shift_x)
|
277 |
+
|
278 |
+
error1 = check_deformation_spatial_errors(lms_def_right, part_inds_right, pad=pad)
|
279 |
+
if error1:
|
280 |
+
lms_def_right = lms_def_scale.copy()
|
281 |
+
|
282 |
+
# left eye
|
283 |
+
part_mean = np.mean(lms_def_scale[part_inds_left, :], 0)
|
284 |
+
lms_part_norm = lms_def_scale[part_inds_left, :] - part_mean
|
285 |
+
|
286 |
+
part_y_bound_min, part_x_bound_min = np.min(lms_part_norm, 0)
|
287 |
+
part_y_bound_max, part_x_bound_max = np.max(lms_part_norm, 0)
|
288 |
+
|
289 |
+
shift_x = np.random.uniform(x_min_left - (part_mean[1] + part_x_bound_min),
|
290 |
+
x_max_left - (part_mean[1] + part_x_bound_max))
|
291 |
+
shift_y = np.random.uniform(y_min_left - (part_mean[0] + part_y_bound_min),
|
292 |
+
y_max_left - (part_mean[0] + part_y_bound_max))
|
293 |
+
|
294 |
+
lms_def = deform_part(lms_def_right.copy(), part_inds_left, scale_y=1., scale_x=1., shift_ver=shift_y,
|
295 |
+
shift_horiz=shift_x)
|
296 |
+
|
297 |
+
error2 = check_deformation_spatial_errors(lms_def, part_inds_left, pad=pad)
|
298 |
+
if error2:
|
299 |
+
lms_def = lms_def_right.copy()
|
300 |
+
else:
|
301 |
+
lms_def = lms_def_scale.copy()
|
302 |
+
|
303 |
+
return lms_def
|
304 |
+
|
305 |
+
|
306 |
+
def deform_scale_face(lms, p_scale=0, pad=5, image_size=256):
|
307 |
+
""" change face landmarks scale & aspect ratio - matching ibug annotations of 68 landmarks """
|
308 |
+
|
309 |
+
part_inds = np.arange(68)
|
310 |
+
|
311 |
+
# find spatial limitations
|
312 |
+
x_max = np.max(lms[part_inds, 1]) + (image_size - np.max(lms[part_inds, 1])) * 0.5 - pad
|
313 |
+
x_min = np.min(lms[part_inds, 1]) * 0.5 + pad
|
314 |
+
|
315 |
+
y_min = 2 * pad
|
316 |
+
y_max = np.max(lms[part_inds, 0]) + (image_size - np.max(lms[part_inds, 0])) * 0.5 - pad
|
317 |
+
|
318 |
+
if p_scale > 0.5:
|
319 |
+
|
320 |
+
part_mean = np.mean(lms[part_inds, :], 0)
|
321 |
+
lms_part_norm = lms[part_inds, :] - part_mean
|
322 |
+
|
323 |
+
part_y_bound_min, part_x_bound_min = np.min(lms_part_norm, 0)
|
324 |
+
part_y_bound_max, part_x_bound_max = np.max(lms_part_norm, 0)
|
325 |
+
|
326 |
+
scale_max_y = np.minimum(
|
327 |
+
(y_min - part_mean[0]) / part_y_bound_min,
|
328 |
+
(y_max - part_mean[0]) / part_y_bound_max)
|
329 |
+
scale_max_y = np.minimum(scale_max_y, 1.2)
|
330 |
+
|
331 |
+
scale_max_x = np.minimum(
|
332 |
+
(x_min - part_mean[1]) / part_x_bound_min,
|
333 |
+
(x_max - part_mean[1]) / part_x_bound_max)
|
334 |
+
scale_max_x = np.minimum(scale_max_x, 1.2)
|
335 |
+
|
336 |
+
scale_y = np.random.uniform(0.6, scale_max_y)
|
337 |
+
scale_x = np.random.uniform(0.6, scale_max_x)
|
338 |
+
|
339 |
+
lms_def_scale = deform_part(lms, part_inds, scale_y=scale_y, scale_x=scale_x, shift_ver=0., shift_horiz=0.)
|
340 |
+
|
341 |
+
# check for spatial errors
|
342 |
+
error2 = np.sum(lms_def_scale >= image_size) + np.sum(lms_def_scale < 0)
|
343 |
+
error1 = len(np.unique((lms_def_scale).astype('int'), axis=0)) != len(lms_def_scale)
|
344 |
+
error = error1 + error2
|
345 |
+
if error:
|
346 |
+
lms_def_scale = lms.copy()
|
347 |
+
else:
|
348 |
+
lms_def_scale = lms.copy()
|
349 |
+
|
350 |
+
return lms_def_scale
|
351 |
+
|
352 |
+
|
353 |
+
def deform_face_geometric_style(lms, p_scale=0, p_shift=0):
|
354 |
+
""" deform facial landmarks - matching ibug annotations of 68 landmarks """
|
355 |
+
|
356 |
+
lms = deform_scale_face(lms.copy(), p_scale=p_scale, pad=0)
|
357 |
+
lms = deform_nose(lms.copy(), p_scale=p_scale, p_shift=p_shift, pad=0)
|
358 |
+
lms = deform_mouth(lms.copy(), p_scale=p_scale, p_shift=p_shift, pad=0)
|
359 |
+
lms = deform_eyes(lms.copy(), p_scale=p_scale, p_shift=p_shift, pad=0)
|
360 |
+
return lms
|
361 |
+
|
362 |
+
|
363 |
+
def get_bounds(lms):
|
364 |
+
part_y_bound_min, part_x_bound_min = np.min(lms,0)
|
365 |
+
part_y_bound_max, part_x_bound_max = np.max(lms,0)
|
366 |
+
return np.array([[part_x_bound_min, part_x_bound_max], [part_y_bound_min, part_y_bound_max]])
|
367 |
+
|
368 |
+
|
369 |
+
def part_intersection(part_to_check, points_to_compare, pad=0):
|
370 |
+
points_to_compare = np.round(points_to_compare.copy())
|
371 |
+
check_bounds = np.round(get_bounds(part_to_check))
|
372 |
+
check_bounds[:, 0] += pad
|
373 |
+
check_bounds[:, 1] -= pad
|
374 |
+
inds_y = np.where(np.logical_and(points_to_compare[:,0] > check_bounds[1,0], points_to_compare[:,0]<check_bounds[1,1]))
|
375 |
+
inds_x = np.where(np.logical_and(points_to_compare[:,1] > check_bounds[0,0], points_to_compare[:,1]<check_bounds[0,1]))
|
376 |
+
return np.intersect1d(inds_y, inds_x)
|
377 |
+
|
378 |
+
|
379 |
+
def check_deformation_spatial_errors(def_landmarks, part_inds,pad=0):
|
380 |
+
""" check for spatial errors in deformed landmarks"""
|
381 |
+
|
382 |
+
part_to_check = def_landmarks[part_inds, :].copy()
|
383 |
+
points_to_compare = np.delete(def_landmarks, part_inds,axis=0).reshape(-1,2)
|
384 |
+
inter_inds = part_intersection(part_to_check,points_to_compare, pad=pad)
|
385 |
+
out = len(inter_inds) > 0
|
386 |
+
return out
|
MakeItTalk/thirdparty/face_of_art/logging_functions.py
CHANGED
@@ -1,200 +1,200 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import os
|
3 |
-
import cv2
|
4 |
-
import matplotlib.pyplot as plt
|
5 |
-
from scipy.ndimage import zoom
|
6 |
-
|
7 |
-
|
8 |
-
def print_training_params_to_file(init_locals):
|
9 |
-
"""save param log file"""
|
10 |
-
|
11 |
-
del init_locals['self']
|
12 |
-
with open(os.path.join(init_locals['save_log_path'], 'Training_Parameters.txt'), 'w') as f:
|
13 |
-
f.write('Training Parameters:\n\n')
|
14 |
-
for key, value in init_locals.items():
|
15 |
-
f.write('* %s: %s\n' % (key, value))
|
16 |
-
|
17 |
-
|
18 |
-
def heat_maps_to_landmarks(maps, image_size=256, num_landmarks=68):
|
19 |
-
"""find landmarks from heatmaps (arg max on each map)"""
|
20 |
-
|
21 |
-
landmarks = np.zeros((num_landmarks,2)).astype('float32')
|
22 |
-
|
23 |
-
for m_ind in range(num_landmarks):
|
24 |
-
landmarks[m_ind, :] = np.unravel_index(maps[:, :, m_ind].argmax(), (image_size, image_size))
|
25 |
-
|
26 |
-
return landmarks
|
27 |
-
|
28 |
-
|
29 |
-
def heat_maps_to_landmarks_alloc_once(maps, landmarks, image_size=256, num_landmarks=68):
|
30 |
-
"""find landmarks from heatmaps (arg max on each map) with pre-allocation"""
|
31 |
-
|
32 |
-
for m_ind in range(num_landmarks):
|
33 |
-
landmarks[m_ind, :] = np.unravel_index(maps[:, :, m_ind].argmax(), (image_size, image_size))
|
34 |
-
|
35 |
-
|
36 |
-
def batch_heat_maps_to_landmarks_alloc_once(batch_maps, batch_landmarks, batch_size, image_size=256, num_landmarks=68):
|
37 |
-
"""find landmarks from heatmaps (arg max on each map) - for multiple images"""
|
38 |
-
|
39 |
-
for i in range(batch_size):
|
40 |
-
heat_maps_to_landmarks_alloc_once(
|
41 |
-
maps=batch_maps[i, :, :, :], landmarks=batch_landmarks[i, :, :], image_size=image_size,
|
42 |
-
num_landmarks=num_landmarks)
|
43 |
-
|
44 |
-
|
45 |
-
def normalize_map(map_in):
|
46 |
-
map_min = map_in.min()
|
47 |
-
return (map_in - map_min) / (map_in.max() - map_min)
|
48 |
-
|
49 |
-
|
50 |
-
def map_to_rgb(map_gray):
|
51 |
-
cmap = plt.get_cmap('jet')
|
52 |
-
rgba_map_image = cmap(map_gray)
|
53 |
-
map_rgb = np.delete(rgba_map_image, 3, 2) * 255
|
54 |
-
return map_rgb
|
55 |
-
|
56 |
-
|
57 |
-
def create_img_with_landmarks(image, landmarks, image_size=256, num_landmarks=68, scale=255, circle_size=2):
|
58 |
-
"""add landmarks to a face image"""
|
59 |
-
image = image.reshape(image_size, image_size, -1)
|
60 |
-
|
61 |
-
if scale is 0:
|
62 |
-
image = 127.5 * (image + 1)
|
63 |
-
elif scale is 1:
|
64 |
-
image *= 255
|
65 |
-
|
66 |
-
landmarks = landmarks.reshape(num_landmarks, 2)
|
67 |
-
landmarks = np.clip(landmarks, 0, image_size-1)
|
68 |
-
|
69 |
-
for (y, x) in landmarks.astype('int'):
|
70 |
-
cv2.circle(image, (x, y), circle_size, (255, 0, 0), -1)
|
71 |
-
|
72 |
-
return image
|
73 |
-
|
74 |
-
|
75 |
-
def heat_maps_to_image(maps, landmarks=None, image_size=256, num_landmarks=68):
|
76 |
-
"""create one image from multiple heatmaps"""
|
77 |
-
|
78 |
-
if landmarks is None:
|
79 |
-
landmarks = heat_maps_to_landmarks(maps, image_size=image_size, num_landmarks=num_landmarks)
|
80 |
-
|
81 |
-
x, y = np.mgrid[0:image_size, 0:image_size]
|
82 |
-
|
83 |
-
pixel_dist = np.sqrt(
|
84 |
-
np.square(np.expand_dims(x, 2) - landmarks[:, 0]) + np.square(np.expand_dims(y, 2) - landmarks[:, 1]))
|
85 |
-
|
86 |
-
nn_landmark = np.argmin(pixel_dist, 2)
|
87 |
-
|
88 |
-
map_image = maps[x, y, nn_landmark]
|
89 |
-
map_image = (map_image-map_image.min())/(map_image.max()-map_image.min()) # normalize for visualization
|
90 |
-
|
91 |
-
return map_image
|
92 |
-
|
93 |
-
|
94 |
-
def merge_images_landmarks_maps_gt(images, maps, maps_gt, landmarks=None, image_size=256, num_landmarks=68,
|
95 |
-
num_samples=9, scale=255, circle_size=2, fast=False):
|
96 |
-
"""create image for log - containing input face images, predicted heatmaps and GT heatmaps (if exists)"""
|
97 |
-
|
98 |
-
images = images[:num_samples]
|
99 |
-
if maps.shape[1] is not image_size:
|
100 |
-
images = zoom(images, (1, 0.25, 0.25, 1))
|
101 |
-
image_size /= 4
|
102 |
-
image_size=int(image_size)
|
103 |
-
if maps_gt is not None:
|
104 |
-
if maps_gt.shape[1] is not image_size:
|
105 |
-
maps_gt = zoom(maps_gt, (1, 0.25, 0.25, 1))
|
106 |
-
|
107 |
-
cmap = plt.get_cmap('jet')
|
108 |
-
|
109 |
-
row = int(np.sqrt(num_samples))
|
110 |
-
if maps_gt is None:
|
111 |
-
merged = np.zeros([row * image_size, row * image_size * 2, 3])
|
112 |
-
else:
|
113 |
-
merged = np.zeros([row * image_size, row * image_size * 3, 3])
|
114 |
-
|
115 |
-
for idx, img in enumerate(images):
|
116 |
-
i = idx // row
|
117 |
-
j = idx % row
|
118 |
-
|
119 |
-
if landmarks is None:
|
120 |
-
img_landmarks = heat_maps_to_landmarks(maps[idx, :, :, :], image_size=image_size,
|
121 |
-
num_landmarks=num_landmarks)
|
122 |
-
else:
|
123 |
-
img_landmarks = landmarks[idx]
|
124 |
-
|
125 |
-
if fast:
|
126 |
-
map_image = np.amax(maps[idx, :, :, :], 2)
|
127 |
-
map_image = (map_image - map_image.min()) / (map_image.max() - map_image.min())
|
128 |
-
else:
|
129 |
-
map_image = heat_maps_to_image(maps[idx, :, :, :], img_landmarks, image_size=image_size,
|
130 |
-
num_landmarks=num_landmarks)
|
131 |
-
rgba_map_image = cmap(map_image)
|
132 |
-
map_image = np.delete(rgba_map_image, 3, 2) * 255
|
133 |
-
|
134 |
-
img = create_img_with_landmarks(img, img_landmarks, image_size, num_landmarks, scale=scale,
|
135 |
-
circle_size=circle_size)
|
136 |
-
|
137 |
-
if maps_gt is not None:
|
138 |
-
if fast:
|
139 |
-
map_gt_image = np.amax(maps_gt[idx, :, :, :], 2)
|
140 |
-
map_gt_image = (map_gt_image - map_gt_image.min()) / (map_gt_image.max() - map_gt_image.min())
|
141 |
-
else:
|
142 |
-
map_gt_image = heat_maps_to_image(maps_gt[idx, :, :, :], image_size=image_size,
|
143 |
-
num_landmarks=num_landmarks)
|
144 |
-
rgba_map_gt_image = cmap(map_gt_image)
|
145 |
-
map_gt_image = np.delete(rgba_map_gt_image, 3, 2) * 255
|
146 |
-
|
147 |
-
merged[i * image_size:(i + 1) * image_size, (j * 3) * image_size:(j * 3 + 1) * image_size, :] = img
|
148 |
-
merged[i * image_size:(i + 1) * image_size, (j * 3 + 1) * image_size:(j * 3 + 2) * image_size,
|
149 |
-
:] = map_image
|
150 |
-
merged[i * image_size:(i + 1) * image_size, (j * 3 + 2) * image_size:(j * 3 + 3) * image_size,
|
151 |
-
:] = map_gt_image
|
152 |
-
else:
|
153 |
-
merged[i * image_size:(i + 1) * image_size, (j * 2) * image_size:(j * 2 + 1) * image_size, :] = img
|
154 |
-
merged[i * image_size:(i + 1) * image_size, (j * 2 + 1) * image_size:(j * 2 + 2) * image_size,:] = map_image
|
155 |
-
|
156 |
-
return merged
|
157 |
-
|
158 |
-
|
159 |
-
def map_comapre_channels(images, maps1, maps2, image_size=64, num_landmarks=68, scale=255):
|
160 |
-
"""create image for log - present one face image, along with all its heatmaps (one for each landmark)"""
|
161 |
-
|
162 |
-
map1 = maps1[0]
|
163 |
-
if maps2 is not None:
|
164 |
-
map2 = maps2[0]
|
165 |
-
image = images[0]
|
166 |
-
|
167 |
-
if image.shape[0] is not image_size:
|
168 |
-
image = zoom(image, (0.25, 0.25, 1))
|
169 |
-
if scale is 1:
|
170 |
-
image *= 255
|
171 |
-
elif scale is 0:
|
172 |
-
image = 127.5 * (image + 1)
|
173 |
-
|
174 |
-
row = np.ceil(np.sqrt(num_landmarks)).astype(np.int64)
|
175 |
-
if maps2 is not None:
|
176 |
-
merged = np.zeros([row * image_size, row * image_size * 2, 3])
|
177 |
-
else:
|
178 |
-
merged = np.zeros([row * image_size, row * image_size, 3])
|
179 |
-
|
180 |
-
for idx in range(num_landmarks):
|
181 |
-
i = idx // row
|
182 |
-
j = idx % row
|
183 |
-
channel_map = map_to_rgb(normalize_map(map1[:, :, idx]))
|
184 |
-
if maps2 is not None:
|
185 |
-
channel_map2 = map_to_rgb(normalize_map(map2[:, :, idx]))
|
186 |
-
merged[i * image_size:(i + 1) * image_size, (j * 2) * image_size:(j * 2 + 1) * image_size, :] =\
|
187 |
-
channel_map
|
188 |
-
merged[i * image_size:(i + 1) * image_size, (j * 2 + 1) * image_size:(j * 2 + 2) * image_size, :] =\
|
189 |
-
channel_map2
|
190 |
-
else:
|
191 |
-
merged[i * image_size:(i + 1) * image_size, j * image_size:(j + 1) * image_size, :] = channel_map
|
192 |
-
|
193 |
-
i = (idx + 1) // row
|
194 |
-
j = (idx + 1) % row
|
195 |
-
if maps2 is not None:
|
196 |
-
merged[i * image_size:(i + 1) * image_size, (j * 2) * image_size:(j * 2 + 1) * image_size, :] = image
|
197 |
-
else:
|
198 |
-
merged[i * image_size:(i + 1) * image_size, j * image_size:(j + 1) * image_size, :] = image
|
199 |
-
return merged
|
200 |
-
|
|
|
1 |
+
import numpy as np
|
2 |
+
import os
|
3 |
+
import cv2
|
4 |
+
import matplotlib.pyplot as plt
|
5 |
+
from scipy.ndimage import zoom
|
6 |
+
|
7 |
+
|
8 |
+
def print_training_params_to_file(init_locals):
|
9 |
+
"""save param log file"""
|
10 |
+
|
11 |
+
del init_locals['self']
|
12 |
+
with open(os.path.join(init_locals['save_log_path'], 'Training_Parameters.txt'), 'w') as f:
|
13 |
+
f.write('Training Parameters:\n\n')
|
14 |
+
for key, value in init_locals.items():
|
15 |
+
f.write('* %s: %s\n' % (key, value))
|
16 |
+
|
17 |
+
|
18 |
+
def heat_maps_to_landmarks(maps, image_size=256, num_landmarks=68):
|
19 |
+
"""find landmarks from heatmaps (arg max on each map)"""
|
20 |
+
|
21 |
+
landmarks = np.zeros((num_landmarks,2)).astype('float32')
|
22 |
+
|
23 |
+
for m_ind in range(num_landmarks):
|
24 |
+
landmarks[m_ind, :] = np.unravel_index(maps[:, :, m_ind].argmax(), (image_size, image_size))
|
25 |
+
|
26 |
+
return landmarks
|
27 |
+
|
28 |
+
|
29 |
+
def heat_maps_to_landmarks_alloc_once(maps, landmarks, image_size=256, num_landmarks=68):
|
30 |
+
"""find landmarks from heatmaps (arg max on each map) with pre-allocation"""
|
31 |
+
|
32 |
+
for m_ind in range(num_landmarks):
|
33 |
+
landmarks[m_ind, :] = np.unravel_index(maps[:, :, m_ind].argmax(), (image_size, image_size))
|
34 |
+
|
35 |
+
|
36 |
+
def batch_heat_maps_to_landmarks_alloc_once(batch_maps, batch_landmarks, batch_size, image_size=256, num_landmarks=68):
|
37 |
+
"""find landmarks from heatmaps (arg max on each map) - for multiple images"""
|
38 |
+
|
39 |
+
for i in range(batch_size):
|
40 |
+
heat_maps_to_landmarks_alloc_once(
|
41 |
+
maps=batch_maps[i, :, :, :], landmarks=batch_landmarks[i, :, :], image_size=image_size,
|
42 |
+
num_landmarks=num_landmarks)
|
43 |
+
|
44 |
+
|
45 |
+
def normalize_map(map_in):
|
46 |
+
map_min = map_in.min()
|
47 |
+
return (map_in - map_min) / (map_in.max() - map_min)
|
48 |
+
|
49 |
+
|
50 |
+
def map_to_rgb(map_gray):
|
51 |
+
cmap = plt.get_cmap('jet')
|
52 |
+
rgba_map_image = cmap(map_gray)
|
53 |
+
map_rgb = np.delete(rgba_map_image, 3, 2) * 255
|
54 |
+
return map_rgb
|
55 |
+
|
56 |
+
|
57 |
+
def create_img_with_landmarks(image, landmarks, image_size=256, num_landmarks=68, scale=255, circle_size=2):
|
58 |
+
"""add landmarks to a face image"""
|
59 |
+
image = image.reshape(image_size, image_size, -1)
|
60 |
+
|
61 |
+
if scale is 0:
|
62 |
+
image = 127.5 * (image + 1)
|
63 |
+
elif scale is 1:
|
64 |
+
image *= 255
|
65 |
+
|
66 |
+
landmarks = landmarks.reshape(num_landmarks, 2)
|
67 |
+
landmarks = np.clip(landmarks, 0, image_size-1)
|
68 |
+
|
69 |
+
for (y, x) in landmarks.astype('int'):
|
70 |
+
cv2.circle(image, (x, y), circle_size, (255, 0, 0), -1)
|
71 |
+
|
72 |
+
return image
|
73 |
+
|
74 |
+
|
75 |
+
def heat_maps_to_image(maps, landmarks=None, image_size=256, num_landmarks=68):
|
76 |
+
"""create one image from multiple heatmaps"""
|
77 |
+
|
78 |
+
if landmarks is None:
|
79 |
+
landmarks = heat_maps_to_landmarks(maps, image_size=image_size, num_landmarks=num_landmarks)
|
80 |
+
|
81 |
+
x, y = np.mgrid[0:image_size, 0:image_size]
|
82 |
+
|
83 |
+
pixel_dist = np.sqrt(
|
84 |
+
np.square(np.expand_dims(x, 2) - landmarks[:, 0]) + np.square(np.expand_dims(y, 2) - landmarks[:, 1]))
|
85 |
+
|
86 |
+
nn_landmark = np.argmin(pixel_dist, 2)
|
87 |
+
|
88 |
+
map_image = maps[x, y, nn_landmark]
|
89 |
+
map_image = (map_image-map_image.min())/(map_image.max()-map_image.min()) # normalize for visualization
|
90 |
+
|
91 |
+
return map_image
|
92 |
+
|
93 |
+
|
94 |
+
def merge_images_landmarks_maps_gt(images, maps, maps_gt, landmarks=None, image_size=256, num_landmarks=68,
|
95 |
+
num_samples=9, scale=255, circle_size=2, fast=False):
|
96 |
+
"""create image for log - containing input face images, predicted heatmaps and GT heatmaps (if exists)"""
|
97 |
+
|
98 |
+
images = images[:num_samples]
|
99 |
+
if maps.shape[1] is not image_size:
|
100 |
+
images = zoom(images, (1, 0.25, 0.25, 1))
|
101 |
+
image_size /= 4
|
102 |
+
image_size=int(image_size)
|
103 |
+
if maps_gt is not None:
|
104 |
+
if maps_gt.shape[1] is not image_size:
|
105 |
+
maps_gt = zoom(maps_gt, (1, 0.25, 0.25, 1))
|
106 |
+
|
107 |
+
cmap = plt.get_cmap('jet')
|
108 |
+
|
109 |
+
row = int(np.sqrt(num_samples))
|
110 |
+
if maps_gt is None:
|
111 |
+
merged = np.zeros([row * image_size, row * image_size * 2, 3])
|
112 |
+
else:
|
113 |
+
merged = np.zeros([row * image_size, row * image_size * 3, 3])
|
114 |
+
|
115 |
+
for idx, img in enumerate(images):
|
116 |
+
i = idx // row
|
117 |
+
j = idx % row
|
118 |
+
|
119 |
+
if landmarks is None:
|
120 |
+
img_landmarks = heat_maps_to_landmarks(maps[idx, :, :, :], image_size=image_size,
|
121 |
+
num_landmarks=num_landmarks)
|
122 |
+
else:
|
123 |
+
img_landmarks = landmarks[idx]
|
124 |
+
|
125 |
+
if fast:
|
126 |
+
map_image = np.amax(maps[idx, :, :, :], 2)
|
127 |
+
map_image = (map_image - map_image.min()) / (map_image.max() - map_image.min())
|
128 |
+
else:
|
129 |
+
map_image = heat_maps_to_image(maps[idx, :, :, :], img_landmarks, image_size=image_size,
|
130 |
+
num_landmarks=num_landmarks)
|
131 |
+
rgba_map_image = cmap(map_image)
|
132 |
+
map_image = np.delete(rgba_map_image, 3, 2) * 255
|
133 |
+
|
134 |
+
img = create_img_with_landmarks(img, img_landmarks, image_size, num_landmarks, scale=scale,
|
135 |
+
circle_size=circle_size)
|
136 |
+
|
137 |
+
if maps_gt is not None:
|
138 |
+
if fast:
|
139 |
+
map_gt_image = np.amax(maps_gt[idx, :, :, :], 2)
|
140 |
+
map_gt_image = (map_gt_image - map_gt_image.min()) / (map_gt_image.max() - map_gt_image.min())
|
141 |
+
else:
|
142 |
+
map_gt_image = heat_maps_to_image(maps_gt[idx, :, :, :], image_size=image_size,
|
143 |
+
num_landmarks=num_landmarks)
|
144 |
+
rgba_map_gt_image = cmap(map_gt_image)
|
145 |
+
map_gt_image = np.delete(rgba_map_gt_image, 3, 2) * 255
|
146 |
+
|
147 |
+
merged[i * image_size:(i + 1) * image_size, (j * 3) * image_size:(j * 3 + 1) * image_size, :] = img
|
148 |
+
merged[i * image_size:(i + 1) * image_size, (j * 3 + 1) * image_size:(j * 3 + 2) * image_size,
|
149 |
+
:] = map_image
|
150 |
+
merged[i * image_size:(i + 1) * image_size, (j * 3 + 2) * image_size:(j * 3 + 3) * image_size,
|
151 |
+
:] = map_gt_image
|
152 |
+
else:
|
153 |
+
merged[i * image_size:(i + 1) * image_size, (j * 2) * image_size:(j * 2 + 1) * image_size, :] = img
|
154 |
+
merged[i * image_size:(i + 1) * image_size, (j * 2 + 1) * image_size:(j * 2 + 2) * image_size,:] = map_image
|
155 |
+
|
156 |
+
return merged
|
157 |
+
|
158 |
+
|
159 |
+
def map_comapre_channels(images, maps1, maps2, image_size=64, num_landmarks=68, scale=255):
|
160 |
+
"""create image for log - present one face image, along with all its heatmaps (one for each landmark)"""
|
161 |
+
|
162 |
+
map1 = maps1[0]
|
163 |
+
if maps2 is not None:
|
164 |
+
map2 = maps2[0]
|
165 |
+
image = images[0]
|
166 |
+
|
167 |
+
if image.shape[0] is not image_size:
|
168 |
+
image = zoom(image, (0.25, 0.25, 1))
|
169 |
+
if scale is 1:
|
170 |
+
image *= 255
|
171 |
+
elif scale is 0:
|
172 |
+
image = 127.5 * (image + 1)
|
173 |
+
|
174 |
+
row = np.ceil(np.sqrt(num_landmarks)).astype(np.int64)
|
175 |
+
if maps2 is not None:
|
176 |
+
merged = np.zeros([row * image_size, row * image_size * 2, 3])
|
177 |
+
else:
|
178 |
+
merged = np.zeros([row * image_size, row * image_size, 3])
|
179 |
+
|
180 |
+
for idx in range(num_landmarks):
|
181 |
+
i = idx // row
|
182 |
+
j = idx % row
|
183 |
+
channel_map = map_to_rgb(normalize_map(map1[:, :, idx]))
|
184 |
+
if maps2 is not None:
|
185 |
+
channel_map2 = map_to_rgb(normalize_map(map2[:, :, idx]))
|
186 |
+
merged[i * image_size:(i + 1) * image_size, (j * 2) * image_size:(j * 2 + 1) * image_size, :] =\
|
187 |
+
channel_map
|
188 |
+
merged[i * image_size:(i + 1) * image_size, (j * 2 + 1) * image_size:(j * 2 + 2) * image_size, :] =\
|
189 |
+
channel_map2
|
190 |
+
else:
|
191 |
+
merged[i * image_size:(i + 1) * image_size, j * image_size:(j + 1) * image_size, :] = channel_map
|
192 |
+
|
193 |
+
i = (idx + 1) // row
|
194 |
+
j = (idx + 1) % row
|
195 |
+
if maps2 is not None:
|
196 |
+
merged[i * image_size:(i + 1) * image_size, (j * 2) * image_size:(j * 2 + 1) * image_size, :] = image
|
197 |
+
else:
|
198 |
+
merged[i * image_size:(i + 1) * image_size, j * image_size:(j + 1) * image_size, :] = image
|
199 |
+
return merged
|
200 |
+
|
MakeItTalk/thirdparty/face_of_art/menpo_functions.py
CHANGED
@@ -1,299 +1,299 @@
|
|
1 |
-
import os
|
2 |
-
from scipy.io import loadmat
|
3 |
-
from menpo.shape.pointcloud import PointCloud
|
4 |
-
from menpo.transform import ThinPlateSplines
|
5 |
-
import menpo.transform as mt
|
6 |
-
|
7 |
-
import menpo.io as mio
|
8 |
-
from glob import glob
|
9 |
-
from thirdparty.face_of_art.deformation_functions import *
|
10 |
-
|
11 |
-
# landmark indices by facial feature
|
12 |
-
jaw_indices = np.arange(0, 17)
|
13 |
-
lbrow_indices = np.arange(17, 22)
|
14 |
-
rbrow_indices = np.arange(22, 27)
|
15 |
-
upper_nose_indices = np.arange(27, 31)
|
16 |
-
lower_nose_indices = np.arange(31, 36)
|
17 |
-
leye_indices = np.arange(36, 42)
|
18 |
-
reye_indices = np.arange(42, 48)
|
19 |
-
outer_mouth_indices = np.arange(48, 60)
|
20 |
-
inner_mouth_indices = np.arange(60, 68)
|
21 |
-
|
22 |
-
# flipped landmark indices
|
23 |
-
mirrored_parts_68 = np.hstack([
|
24 |
-
jaw_indices[::-1], rbrow_indices[::-1], lbrow_indices[::-1],
|
25 |
-
upper_nose_indices, lower_nose_indices[::-1],
|
26 |
-
np.roll(reye_indices[::-1], 4), np.roll(leye_indices[::-1], 4),
|
27 |
-
np.roll(outer_mouth_indices[::-1], 7),
|
28 |
-
np.roll(inner_mouth_indices[::-1], 5)
|
29 |
-
])
|
30 |
-
|
31 |
-
|
32 |
-
def load_bb_files(bb_file_dirs):
|
33 |
-
"""load bounding box mat file for challenging, common, full & training datasets"""
|
34 |
-
|
35 |
-
bb_files_dict = {}
|
36 |
-
for bb_file in bb_file_dirs:
|
37 |
-
bb_mat = loadmat(bb_file)['bounding_boxes']
|
38 |
-
num_imgs = np.max(bb_mat.shape)
|
39 |
-
for i in range(num_imgs):
|
40 |
-
name = bb_mat[0][i][0][0][0][0]
|
41 |
-
bb_init = bb_mat[0][i][0][0][1] - 1 # matlab indicies
|
42 |
-
bb_gt = bb_mat[0][i][0][0][2] - 1 # matlab indicies
|
43 |
-
if str(name) in bb_files_dict.keys():
|
44 |
-
print (str(name) + ' already exists')
|
45 |
-
else:
|
46 |
-
bb_files_dict[str(name)] = (bb_init, bb_gt)
|
47 |
-
return bb_files_dict
|
48 |
-
|
49 |
-
|
50 |
-
def load_bb_dictionary(bb_dir, mode, test_data='full'):
|
51 |
-
"""create bounding box dictionary of input dataset: train/common/full/challenging"""
|
52 |
-
|
53 |
-
if mode == 'TRAIN':
|
54 |
-
bb_dirs = \
|
55 |
-
['bounding_boxes_afw.mat', 'bounding_boxes_helen_trainset.mat', 'bounding_boxes_lfpw_trainset.mat']
|
56 |
-
else:
|
57 |
-
if test_data == 'common':
|
58 |
-
bb_dirs = \
|
59 |
-
['bounding_boxes_helen_testset.mat', 'bounding_boxes_lfpw_testset.mat']
|
60 |
-
elif test_data == 'challenging':
|
61 |
-
bb_dirs = ['bounding_boxes_ibug.mat']
|
62 |
-
elif test_data == 'full':
|
63 |
-
bb_dirs = \
|
64 |
-
['bounding_boxes_ibug.mat', 'bounding_boxes_helen_testset.mat', 'bounding_boxes_lfpw_testset.mat']
|
65 |
-
elif test_data == 'training':
|
66 |
-
bb_dirs = \
|
67 |
-
['bounding_boxes_afw.mat', 'bounding_boxes_helen_trainset.mat', 'bounding_boxes_lfpw_trainset.mat']
|
68 |
-
else:
|
69 |
-
bb_dirs = None
|
70 |
-
|
71 |
-
if mode == 'TEST' and test_data not in ['full', 'challenging', 'common', 'training']:
|
72 |
-
bb_files_dict = None
|
73 |
-
else:
|
74 |
-
bb_dirs = [os.path.join(bb_dir, dataset) for dataset in bb_dirs]
|
75 |
-
bb_files_dict = load_bb_files(bb_dirs)
|
76 |
-
|
77 |
-
return bb_files_dict
|
78 |
-
|
79 |
-
|
80 |
-
def center_margin_bb(bb, img_bounds, margin=0.25):
|
81 |
-
"""create new bounding box with input margin"""
|
82 |
-
|
83 |
-
bb_size = ([bb[0, 2] - bb[0, 0], bb[0, 3] - bb[0, 1]])
|
84 |
-
margins = (np.max(bb_size) * (1 + margin) - bb_size) / 2
|
85 |
-
bb_new = np.zeros_like(bb)
|
86 |
-
bb_new[0, 0] = np.maximum(bb[0, 0] - margins[0], 0)
|
87 |
-
bb_new[0, 2] = np.minimum(bb[0, 2] + margins[0], img_bounds[1])
|
88 |
-
bb_new[0, 1] = np.maximum(bb[0, 1] - margins[1], 0)
|
89 |
-
bb_new[0, 3] = np.minimum(bb[0, 3] + margins[1], img_bounds[0])
|
90 |
-
return bb_new
|
91 |
-
|
92 |
-
|
93 |
-
def crop_to_face_image(img, bb_dictionary=None, gt=True, margin=0.25, image_size=256, normalize=True,
|
94 |
-
return_transform=False):
|
95 |
-
"""crop face image using bounding box dictionary, or GT landmarks"""
|
96 |
-
|
97 |
-
name = img.path.name
|
98 |
-
img_bounds = img.bounds()[1]
|
99 |
-
|
100 |
-
# if there is no bounding-box dict and GT landmarks are available, use it to determine the bounding box
|
101 |
-
if bb_dictionary is None and img.has_landmarks:
|
102 |
-
grp_name = img.landmarks.group_labels[0]
|
103 |
-
bb_menpo = img.landmarks[grp_name].bounding_box().points
|
104 |
-
bb = np.array([[bb_menpo[0, 1], bb_menpo[0, 0], bb_menpo[2, 1], bb_menpo[2, 0]]])
|
105 |
-
elif bb_dictionary is not None:
|
106 |
-
if gt:
|
107 |
-
bb = bb_dictionary[name][1] # ground truth
|
108 |
-
else:
|
109 |
-
bb = bb_dictionary[name][0] # init from face detector
|
110 |
-
else:
|
111 |
-
bb = None
|
112 |
-
|
113 |
-
if bb is not None:
|
114 |
-
# add margin to bounding box
|
115 |
-
bb = center_margin_bb(bb, img_bounds, margin=margin)
|
116 |
-
bb_pointcloud = PointCloud(np.array([[bb[0, 1], bb[0, 0]],
|
117 |
-
[bb[0, 3], bb[0, 0]],
|
118 |
-
[bb[0, 3], bb[0, 2]],
|
119 |
-
[bb[0, 1], bb[0, 2]]]))
|
120 |
-
if return_transform:
|
121 |
-
face_crop, bb_transform = img.crop_to_pointcloud(bb_pointcloud, return_transform=True)
|
122 |
-
else:
|
123 |
-
face_crop = img.crop_to_pointcloud(bb_pointcloud)
|
124 |
-
else:
|
125 |
-
# if there is no bounding box/gt landmarks, use entire image
|
126 |
-
face_crop = img.copy()
|
127 |
-
bb_transform = None
|
128 |
-
|
129 |
-
# if face crop is not a square - pad borders with mean pixel value
|
130 |
-
h, w = face_crop.shape
|
131 |
-
diff = h - w
|
132 |
-
if diff < 0:
|
133 |
-
face_crop.pixels = np.pad(face_crop.pixels, ((0, 0), (0, -1 * diff), (0, 0)), 'mean')
|
134 |
-
elif diff > 0:
|
135 |
-
face_crop.pixels = np.pad(face_crop.pixels, ((0, 0), (0, 0), (0, diff)), 'mean')
|
136 |
-
|
137 |
-
if return_transform:
|
138 |
-
face_crop, rescale_transform = face_crop.resize([image_size, image_size], return_transform=True)
|
139 |
-
if bb_transform is None:
|
140 |
-
transform_chain = rescale_transform
|
141 |
-
else:
|
142 |
-
transform_chain = mt.TransformChain(transforms=(rescale_transform, bb_transform))
|
143 |
-
else:
|
144 |
-
face_crop = face_crop.resize([image_size, image_size])
|
145 |
-
|
146 |
-
if face_crop.n_channels == 4:
|
147 |
-
face_crop.pixels = face_crop.pixels[:3, :, :]
|
148 |
-
|
149 |
-
if normalize:
|
150 |
-
face_crop.pixels = face_crop.rescale_pixels(0., 1.).pixels
|
151 |
-
|
152 |
-
if return_transform:
|
153 |
-
return face_crop, transform_chain
|
154 |
-
else:
|
155 |
-
return face_crop
|
156 |
-
|
157 |
-
|
158 |
-
def augment_face_image(img, image_size=256, crop_size=248, angle_range=30, flip=True):
|
159 |
-
"""basic image augmentation: random crop, rotation and horizontal flip"""
|
160 |
-
|
161 |
-
# taken from MDM: https://github.com/trigeorgis/mdm
|
162 |
-
def mirror_landmarks_68(lms, im_size):
|
163 |
-
return PointCloud(abs(np.array([0, im_size[1]]) - lms.as_vector(
|
164 |
-
).reshape(-1, 2))[mirrored_parts_68])
|
165 |
-
|
166 |
-
# taken from MDM: https://github.com/trigeorgis/mdm
|
167 |
-
def mirror_image(im):
|
168 |
-
im = im.copy()
|
169 |
-
im.pixels = im.pixels[..., ::-1].copy()
|
170 |
-
|
171 |
-
for group in im.landmarks:
|
172 |
-
lms = im.landmarks[group]
|
173 |
-
if lms.points.shape[0] == 68:
|
174 |
-
im.landmarks[group] = mirror_landmarks_68(lms, im.shape)
|
175 |
-
|
176 |
-
return im
|
177 |
-
|
178 |
-
flip_rand = np.random.random() > 0.5
|
179 |
-
# rot_rand = np.random.random() > 0.5
|
180 |
-
# crop_rand = np.random.random() > 0.5
|
181 |
-
rot_rand = True # like ECT: https://github.com/HongwenZhang/ECT-FaceAlignment
|
182 |
-
crop_rand = True # like ECT: https://github.com/HongwenZhang/ECT-FaceAlignment
|
183 |
-
|
184 |
-
if crop_rand:
|
185 |
-
lim = image_size - crop_size
|
186 |
-
min_crop_inds = np.random.randint(0, lim, 2)
|
187 |
-
max_crop_inds = min_crop_inds + crop_size
|
188 |
-
img = img.crop(min_crop_inds, max_crop_inds)
|
189 |
-
|
190 |
-
if flip and flip_rand:
|
191 |
-
img = mirror_image(img)
|
192 |
-
|
193 |
-
if rot_rand:
|
194 |
-
rot_angle = 2 * angle_range * np.random.random_sample() - angle_range
|
195 |
-
img = img.rotate_ccw_about_centre(rot_angle)
|
196 |
-
|
197 |
-
img = img.resize([image_size, image_size])
|
198 |
-
|
199 |
-
return img
|
200 |
-
|
201 |
-
|
202 |
-
def augment_menpo_img_ns(img, img_dir_ns, p_ns=0.):
|
203 |
-
"""texture style image augmentation using stylized copies in *img_dir_ns*"""
|
204 |
-
|
205 |
-
img = img.copy()
|
206 |
-
if p_ns > 0.5:
|
207 |
-
ns_augs = glob(os.path.join(img_dir_ns, img.path.name.split('.')[0] + '_ns*'))
|
208 |
-
num_augs = len(ns_augs)
|
209 |
-
if num_augs > 0:
|
210 |
-
ns_ind = np.random.randint(0, num_augs)
|
211 |
-
ns_aug = mio.import_image(ns_augs[ns_ind])
|
212 |
-
ns_pixels = ns_aug.pixels
|
213 |
-
img.pixels = ns_pixels
|
214 |
-
return img
|
215 |
-
|
216 |
-
|
217 |
-
def augment_menpo_img_geom(img, p_geom=0.):
|
218 |
-
"""geometric style image augmentation using random face deformations"""
|
219 |
-
|
220 |
-
img = img.copy()
|
221 |
-
if p_geom > 0.5:
|
222 |
-
grp_name = img.landmarks.group_labels[0]
|
223 |
-
lms_geom_warp = deform_face_geometric_style(img.landmarks[grp_name].points.copy(), p_scale=p_geom, p_shift=p_geom)
|
224 |
-
img = warp_face_image_tps(img, PointCloud(lms_geom_warp), grp_name)
|
225 |
-
return img
|
226 |
-
|
227 |
-
|
228 |
-
def warp_face_image_tps(img, new_shape, lms_grp_name='PTS', warp_mode='constant'):
|
229 |
-
"""warp image to new landmarks using TPS interpolation"""
|
230 |
-
|
231 |
-
tps = ThinPlateSplines(new_shape, img.landmarks[lms_grp_name])
|
232 |
-
try:
|
233 |
-
img_warp = img.warp_to_shape(img.shape, tps, mode=warp_mode)
|
234 |
-
img_warp.landmarks[lms_grp_name] = new_shape
|
235 |
-
return img_warp
|
236 |
-
except np.linalg.linalg.LinAlgError as err:
|
237 |
-
print ('Error:'+str(err)+'\nUsing original landmarks for:\n'+str(img.path))
|
238 |
-
return img
|
239 |
-
|
240 |
-
|
241 |
-
def load_menpo_image_list(
|
242 |
-
img_dir, train_crop_dir, img_dir_ns, mode, bb_dictionary=None, image_size=256, margin=0.25,
|
243 |
-
bb_type='gt', test_data='full', augment_basic=True, augment_texture=False, p_texture=0,
|
244 |
-
augment_geom=False, p_geom=0, verbose=False, return_transform=False):
|
245 |
-
|
246 |
-
"""load images from image dir to create menpo-type image list"""
|
247 |
-
|
248 |
-
def crop_to_face_image_gt(img):
|
249 |
-
return crop_to_face_image(img, bb_dictionary, gt=True, margin=margin, image_size=image_size,
|
250 |
-
return_transform=return_transform)
|
251 |
-
|
252 |
-
def crop_to_face_image_init(img):
|
253 |
-
return crop_to_face_image(img, bb_dictionary, gt=False, margin=margin, image_size=image_size,
|
254 |
-
return_transform=return_transform)
|
255 |
-
|
256 |
-
def crop_to_face_image_test(img):
|
257 |
-
return crop_to_face_image(img, bb_dictionary=None, margin=margin, image_size=image_size,
|
258 |
-
return_transform=return_transform)
|
259 |
-
|
260 |
-
def augment_menpo_img_ns_rand(img):
|
261 |
-
return augment_menpo_img_ns(img, img_dir_ns, p_ns=1. * (np.random.rand() < p_texture)[0])
|
262 |
-
|
263 |
-
def augment_menpo_img_geom_rand(img):
|
264 |
-
return augment_menpo_img_geom(img, p_geom=1. * (np.random.rand() < p_geom)[0])
|
265 |
-
|
266 |
-
if mode is 'TRAIN':
|
267 |
-
if train_crop_dir is None:
|
268 |
-
img_set_dir = os.path.join(img_dir, 'training')
|
269 |
-
out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False)
|
270 |
-
if bb_type is 'gt':
|
271 |
-
out_image_list = out_image_list.map(crop_to_face_image_gt)
|
272 |
-
elif bb_type is 'init':
|
273 |
-
out_image_list = out_image_list.map(crop_to_face_image_init)
|
274 |
-
else:
|
275 |
-
img_set_dir = os.path.join(img_dir, train_crop_dir)
|
276 |
-
out_image_list = mio.import_images(img_set_dir, verbose=verbose)
|
277 |
-
|
278 |
-
# perform image augmentation
|
279 |
-
if augment_texture and p_texture > 0:
|
280 |
-
out_image_list = out_image_list.map(augment_menpo_img_ns_rand)
|
281 |
-
if augment_geom and p_geom > 0:
|
282 |
-
out_image_list = out_image_list.map(augment_menpo_img_geom_rand)
|
283 |
-
if augment_basic:
|
284 |
-
out_image_list = out_image_list.map(augment_face_image)
|
285 |
-
|
286 |
-
else: # if mode is 'TEST', load test data
|
287 |
-
if test_data in ['full', 'challenging', 'common', 'training', 'test']:
|
288 |
-
img_set_dir = os.path.join(img_dir, test_data)
|
289 |
-
out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False)
|
290 |
-
if bb_type is 'gt':
|
291 |
-
out_image_list = out_image_list.map(crop_to_face_image_gt)
|
292 |
-
elif bb_type is 'init':
|
293 |
-
out_image_list = out_image_list.map(crop_to_face_image_init)
|
294 |
-
else:
|
295 |
-
img_set_dir = os.path.join(img_dir, test_data+'*')
|
296 |
-
out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False)
|
297 |
-
out_image_list = out_image_list.map(crop_to_face_image_test)
|
298 |
-
|
299 |
-
return out_image_list
|
|
|
1 |
+
import os
|
2 |
+
from scipy.io import loadmat
|
3 |
+
from menpo.shape.pointcloud import PointCloud
|
4 |
+
from menpo.transform import ThinPlateSplines
|
5 |
+
import menpo.transform as mt
|
6 |
+
|
7 |
+
import menpo.io as mio
|
8 |
+
from glob import glob
|
9 |
+
from thirdparty.face_of_art.deformation_functions import *
|
10 |
+
|
11 |
+
# landmark indices by facial feature
|
12 |
+
jaw_indices = np.arange(0, 17)
|
13 |
+
lbrow_indices = np.arange(17, 22)
|
14 |
+
rbrow_indices = np.arange(22, 27)
|
15 |
+
upper_nose_indices = np.arange(27, 31)
|
16 |
+
lower_nose_indices = np.arange(31, 36)
|
17 |
+
leye_indices = np.arange(36, 42)
|
18 |
+
reye_indices = np.arange(42, 48)
|
19 |
+
outer_mouth_indices = np.arange(48, 60)
|
20 |
+
inner_mouth_indices = np.arange(60, 68)
|
21 |
+
|
22 |
+
# flipped landmark indices
|
23 |
+
mirrored_parts_68 = np.hstack([
|
24 |
+
jaw_indices[::-1], rbrow_indices[::-1], lbrow_indices[::-1],
|
25 |
+
upper_nose_indices, lower_nose_indices[::-1],
|
26 |
+
np.roll(reye_indices[::-1], 4), np.roll(leye_indices[::-1], 4),
|
27 |
+
np.roll(outer_mouth_indices[::-1], 7),
|
28 |
+
np.roll(inner_mouth_indices[::-1], 5)
|
29 |
+
])
|
30 |
+
|
31 |
+
|
32 |
+
def load_bb_files(bb_file_dirs):
|
33 |
+
"""load bounding box mat file for challenging, common, full & training datasets"""
|
34 |
+
|
35 |
+
bb_files_dict = {}
|
36 |
+
for bb_file in bb_file_dirs:
|
37 |
+
bb_mat = loadmat(bb_file)['bounding_boxes']
|
38 |
+
num_imgs = np.max(bb_mat.shape)
|
39 |
+
for i in range(num_imgs):
|
40 |
+
name = bb_mat[0][i][0][0][0][0]
|
41 |
+
bb_init = bb_mat[0][i][0][0][1] - 1 # matlab indicies
|
42 |
+
bb_gt = bb_mat[0][i][0][0][2] - 1 # matlab indicies
|
43 |
+
if str(name) in bb_files_dict.keys():
|
44 |
+
print (str(name) + ' already exists')
|
45 |
+
else:
|
46 |
+
bb_files_dict[str(name)] = (bb_init, bb_gt)
|
47 |
+
return bb_files_dict
|
48 |
+
|
49 |
+
|
50 |
+
def load_bb_dictionary(bb_dir, mode, test_data='full'):
|
51 |
+
"""create bounding box dictionary of input dataset: train/common/full/challenging"""
|
52 |
+
|
53 |
+
if mode == 'TRAIN':
|
54 |
+
bb_dirs = \
|
55 |
+
['bounding_boxes_afw.mat', 'bounding_boxes_helen_trainset.mat', 'bounding_boxes_lfpw_trainset.mat']
|
56 |
+
else:
|
57 |
+
if test_data == 'common':
|
58 |
+
bb_dirs = \
|
59 |
+
['bounding_boxes_helen_testset.mat', 'bounding_boxes_lfpw_testset.mat']
|
60 |
+
elif test_data == 'challenging':
|
61 |
+
bb_dirs = ['bounding_boxes_ibug.mat']
|
62 |
+
elif test_data == 'full':
|
63 |
+
bb_dirs = \
|
64 |
+
['bounding_boxes_ibug.mat', 'bounding_boxes_helen_testset.mat', 'bounding_boxes_lfpw_testset.mat']
|
65 |
+
elif test_data == 'training':
|
66 |
+
bb_dirs = \
|
67 |
+
['bounding_boxes_afw.mat', 'bounding_boxes_helen_trainset.mat', 'bounding_boxes_lfpw_trainset.mat']
|
68 |
+
else:
|
69 |
+
bb_dirs = None
|
70 |
+
|
71 |
+
if mode == 'TEST' and test_data not in ['full', 'challenging', 'common', 'training']:
|
72 |
+
bb_files_dict = None
|
73 |
+
else:
|
74 |
+
bb_dirs = [os.path.join(bb_dir, dataset) for dataset in bb_dirs]
|
75 |
+
bb_files_dict = load_bb_files(bb_dirs)
|
76 |
+
|
77 |
+
return bb_files_dict
|
78 |
+
|
79 |
+
|
80 |
+
def center_margin_bb(bb, img_bounds, margin=0.25):
|
81 |
+
"""create new bounding box with input margin"""
|
82 |
+
|
83 |
+
bb_size = ([bb[0, 2] - bb[0, 0], bb[0, 3] - bb[0, 1]])
|
84 |
+
margins = (np.max(bb_size) * (1 + margin) - bb_size) / 2
|
85 |
+
bb_new = np.zeros_like(bb)
|
86 |
+
bb_new[0, 0] = np.maximum(bb[0, 0] - margins[0], 0)
|
87 |
+
bb_new[0, 2] = np.minimum(bb[0, 2] + margins[0], img_bounds[1])
|
88 |
+
bb_new[0, 1] = np.maximum(bb[0, 1] - margins[1], 0)
|
89 |
+
bb_new[0, 3] = np.minimum(bb[0, 3] + margins[1], img_bounds[0])
|
90 |
+
return bb_new
|
91 |
+
|
92 |
+
|
93 |
+
def crop_to_face_image(img, bb_dictionary=None, gt=True, margin=0.25, image_size=256, normalize=True,
|
94 |
+
return_transform=False):
|
95 |
+
"""crop face image using bounding box dictionary, or GT landmarks"""
|
96 |
+
|
97 |
+
name = img.path.name
|
98 |
+
img_bounds = img.bounds()[1]
|
99 |
+
|
100 |
+
# if there is no bounding-box dict and GT landmarks are available, use it to determine the bounding box
|
101 |
+
if bb_dictionary is None and img.has_landmarks:
|
102 |
+
grp_name = img.landmarks.group_labels[0]
|
103 |
+
bb_menpo = img.landmarks[grp_name].bounding_box().points
|
104 |
+
bb = np.array([[bb_menpo[0, 1], bb_menpo[0, 0], bb_menpo[2, 1], bb_menpo[2, 0]]])
|
105 |
+
elif bb_dictionary is not None:
|
106 |
+
if gt:
|
107 |
+
bb = bb_dictionary[name][1] # ground truth
|
108 |
+
else:
|
109 |
+
bb = bb_dictionary[name][0] # init from face detector
|
110 |
+
else:
|
111 |
+
bb = None
|
112 |
+
|
113 |
+
if bb is not None:
|
114 |
+
# add margin to bounding box
|
115 |
+
bb = center_margin_bb(bb, img_bounds, margin=margin)
|
116 |
+
bb_pointcloud = PointCloud(np.array([[bb[0, 1], bb[0, 0]],
|
117 |
+
[bb[0, 3], bb[0, 0]],
|
118 |
+
[bb[0, 3], bb[0, 2]],
|
119 |
+
[bb[0, 1], bb[0, 2]]]))
|
120 |
+
if return_transform:
|
121 |
+
face_crop, bb_transform = img.crop_to_pointcloud(bb_pointcloud, return_transform=True)
|
122 |
+
else:
|
123 |
+
face_crop = img.crop_to_pointcloud(bb_pointcloud)
|
124 |
+
else:
|
125 |
+
# if there is no bounding box/gt landmarks, use entire image
|
126 |
+
face_crop = img.copy()
|
127 |
+
bb_transform = None
|
128 |
+
|
129 |
+
# if face crop is not a square - pad borders with mean pixel value
|
130 |
+
h, w = face_crop.shape
|
131 |
+
diff = h - w
|
132 |
+
if diff < 0:
|
133 |
+
face_crop.pixels = np.pad(face_crop.pixels, ((0, 0), (0, -1 * diff), (0, 0)), 'mean')
|
134 |
+
elif diff > 0:
|
135 |
+
face_crop.pixels = np.pad(face_crop.pixels, ((0, 0), (0, 0), (0, diff)), 'mean')
|
136 |
+
|
137 |
+
if return_transform:
|
138 |
+
face_crop, rescale_transform = face_crop.resize([image_size, image_size], return_transform=True)
|
139 |
+
if bb_transform is None:
|
140 |
+
transform_chain = rescale_transform
|
141 |
+
else:
|
142 |
+
transform_chain = mt.TransformChain(transforms=(rescale_transform, bb_transform))
|
143 |
+
else:
|
144 |
+
face_crop = face_crop.resize([image_size, image_size])
|
145 |
+
|
146 |
+
if face_crop.n_channels == 4:
|
147 |
+
face_crop.pixels = face_crop.pixels[:3, :, :]
|
148 |
+
|
149 |
+
if normalize:
|
150 |
+
face_crop.pixels = face_crop.rescale_pixels(0., 1.).pixels
|
151 |
+
|
152 |
+
if return_transform:
|
153 |
+
return face_crop, transform_chain
|
154 |
+
else:
|
155 |
+
return face_crop
|
156 |
+
|
157 |
+
|
158 |
+
def augment_face_image(img, image_size=256, crop_size=248, angle_range=30, flip=True):
|
159 |
+
"""basic image augmentation: random crop, rotation and horizontal flip"""
|
160 |
+
|
161 |
+
# taken from MDM: https://github.com/trigeorgis/mdm
|
162 |
+
def mirror_landmarks_68(lms, im_size):
|
163 |
+
return PointCloud(abs(np.array([0, im_size[1]]) - lms.as_vector(
|
164 |
+
).reshape(-1, 2))[mirrored_parts_68])
|
165 |
+
|
166 |
+
# taken from MDM: https://github.com/trigeorgis/mdm
|
167 |
+
def mirror_image(im):
|
168 |
+
im = im.copy()
|
169 |
+
im.pixels = im.pixels[..., ::-1].copy()
|
170 |
+
|
171 |
+
for group in im.landmarks:
|
172 |
+
lms = im.landmarks[group]
|
173 |
+
if lms.points.shape[0] == 68:
|
174 |
+
im.landmarks[group] = mirror_landmarks_68(lms, im.shape)
|
175 |
+
|
176 |
+
return im
|
177 |
+
|
178 |
+
flip_rand = np.random.random() > 0.5
|
179 |
+
# rot_rand = np.random.random() > 0.5
|
180 |
+
# crop_rand = np.random.random() > 0.5
|
181 |
+
rot_rand = True # like ECT: https://github.com/HongwenZhang/ECT-FaceAlignment
|
182 |
+
crop_rand = True # like ECT: https://github.com/HongwenZhang/ECT-FaceAlignment
|
183 |
+
|
184 |
+
if crop_rand:
|
185 |
+
lim = image_size - crop_size
|
186 |
+
min_crop_inds = np.random.randint(0, lim, 2)
|
187 |
+
max_crop_inds = min_crop_inds + crop_size
|
188 |
+
img = img.crop(min_crop_inds, max_crop_inds)
|
189 |
+
|
190 |
+
if flip and flip_rand:
|
191 |
+
img = mirror_image(img)
|
192 |
+
|
193 |
+
if rot_rand:
|
194 |
+
rot_angle = 2 * angle_range * np.random.random_sample() - angle_range
|
195 |
+
img = img.rotate_ccw_about_centre(rot_angle)
|
196 |
+
|
197 |
+
img = img.resize([image_size, image_size])
|
198 |
+
|
199 |
+
return img
|
200 |
+
|
201 |
+
|
202 |
+
def augment_menpo_img_ns(img, img_dir_ns, p_ns=0.):
|
203 |
+
"""texture style image augmentation using stylized copies in *img_dir_ns*"""
|
204 |
+
|
205 |
+
img = img.copy()
|
206 |
+
if p_ns > 0.5:
|
207 |
+
ns_augs = glob(os.path.join(img_dir_ns, img.path.name.split('.')[0] + '_ns*'))
|
208 |
+
num_augs = len(ns_augs)
|
209 |
+
if num_augs > 0:
|
210 |
+
ns_ind = np.random.randint(0, num_augs)
|
211 |
+
ns_aug = mio.import_image(ns_augs[ns_ind])
|
212 |
+
ns_pixels = ns_aug.pixels
|
213 |
+
img.pixels = ns_pixels
|
214 |
+
return img
|
215 |
+
|
216 |
+
|
217 |
+
def augment_menpo_img_geom(img, p_geom=0.):
|
218 |
+
"""geometric style image augmentation using random face deformations"""
|
219 |
+
|
220 |
+
img = img.copy()
|
221 |
+
if p_geom > 0.5:
|
222 |
+
grp_name = img.landmarks.group_labels[0]
|
223 |
+
lms_geom_warp = deform_face_geometric_style(img.landmarks[grp_name].points.copy(), p_scale=p_geom, p_shift=p_geom)
|
224 |
+
img = warp_face_image_tps(img, PointCloud(lms_geom_warp), grp_name)
|
225 |
+
return img
|
226 |
+
|
227 |
+
|
228 |
+
def warp_face_image_tps(img, new_shape, lms_grp_name='PTS', warp_mode='constant'):
|
229 |
+
"""warp image to new landmarks using TPS interpolation"""
|
230 |
+
|
231 |
+
tps = ThinPlateSplines(new_shape, img.landmarks[lms_grp_name])
|
232 |
+
try:
|
233 |
+
img_warp = img.warp_to_shape(img.shape, tps, mode=warp_mode)
|
234 |
+
img_warp.landmarks[lms_grp_name] = new_shape
|
235 |
+
return img_warp
|
236 |
+
except np.linalg.linalg.LinAlgError as err:
|
237 |
+
print ('Error:'+str(err)+'\nUsing original landmarks for:\n'+str(img.path))
|
238 |
+
return img
|
239 |
+
|
240 |
+
|
241 |
+
def load_menpo_image_list(
|
242 |
+
img_dir, train_crop_dir, img_dir_ns, mode, bb_dictionary=None, image_size=256, margin=0.25,
|
243 |
+
bb_type='gt', test_data='full', augment_basic=True, augment_texture=False, p_texture=0,
|
244 |
+
augment_geom=False, p_geom=0, verbose=False, return_transform=False):
|
245 |
+
|
246 |
+
"""load images from image dir to create menpo-type image list"""
|
247 |
+
|
248 |
+
def crop_to_face_image_gt(img):
|
249 |
+
return crop_to_face_image(img, bb_dictionary, gt=True, margin=margin, image_size=image_size,
|
250 |
+
return_transform=return_transform)
|
251 |
+
|
252 |
+
def crop_to_face_image_init(img):
|
253 |
+
return crop_to_face_image(img, bb_dictionary, gt=False, margin=margin, image_size=image_size,
|
254 |
+
return_transform=return_transform)
|
255 |
+
|
256 |
+
def crop_to_face_image_test(img):
|
257 |
+
return crop_to_face_image(img, bb_dictionary=None, margin=margin, image_size=image_size,
|
258 |
+
return_transform=return_transform)
|
259 |
+
|
260 |
+
def augment_menpo_img_ns_rand(img):
|
261 |
+
return augment_menpo_img_ns(img, img_dir_ns, p_ns=1. * (np.random.rand() < p_texture)[0])
|
262 |
+
|
263 |
+
def augment_menpo_img_geom_rand(img):
|
264 |
+
return augment_menpo_img_geom(img, p_geom=1. * (np.random.rand() < p_geom)[0])
|
265 |
+
|
266 |
+
if mode is 'TRAIN':
|
267 |
+
if train_crop_dir is None:
|
268 |
+
img_set_dir = os.path.join(img_dir, 'training')
|
269 |
+
out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False)
|
270 |
+
if bb_type is 'gt':
|
271 |
+
out_image_list = out_image_list.map(crop_to_face_image_gt)
|
272 |
+
elif bb_type is 'init':
|
273 |
+
out_image_list = out_image_list.map(crop_to_face_image_init)
|
274 |
+
else:
|
275 |
+
img_set_dir = os.path.join(img_dir, train_crop_dir)
|
276 |
+
out_image_list = mio.import_images(img_set_dir, verbose=verbose)
|
277 |
+
|
278 |
+
# perform image augmentation
|
279 |
+
if augment_texture and p_texture > 0:
|
280 |
+
out_image_list = out_image_list.map(augment_menpo_img_ns_rand)
|
281 |
+
if augment_geom and p_geom > 0:
|
282 |
+
out_image_list = out_image_list.map(augment_menpo_img_geom_rand)
|
283 |
+
if augment_basic:
|
284 |
+
out_image_list = out_image_list.map(augment_face_image)
|
285 |
+
|
286 |
+
else: # if mode is 'TEST', load test data
|
287 |
+
if test_data in ['full', 'challenging', 'common', 'training', 'test']:
|
288 |
+
img_set_dir = os.path.join(img_dir, test_data)
|
289 |
+
out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False)
|
290 |
+
if bb_type is 'gt':
|
291 |
+
out_image_list = out_image_list.map(crop_to_face_image_gt)
|
292 |
+
elif bb_type is 'init':
|
293 |
+
out_image_list = out_image_list.map(crop_to_face_image_init)
|
294 |
+
else:
|
295 |
+
img_set_dir = os.path.join(img_dir, test_data+'*')
|
296 |
+
out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False)
|
297 |
+
out_image_list = out_image_list.map(crop_to_face_image_test)
|
298 |
+
|
299 |
+
return out_image_list
|
MakeItTalk/thirdparty/face_of_art/old/create_artistic_data_in_advance.ipynb
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
MakeItTalk/thirdparty/face_of_art/old/deep_heatmaps_model_ect.py
CHANGED
@@ -1,544 +1,544 @@
|
|
1 |
-
import scipy.io
|
2 |
-
import scipy.misc
|
3 |
-
from glob import glob
|
4 |
-
import os
|
5 |
-
import numpy as np
|
6 |
-
from image_utils import *
|
7 |
-
from ops import *
|
8 |
-
from sklearn.model_selection import train_test_split
|
9 |
-
import tensorflow as tf
|
10 |
-
from tensorflow import contrib
|
11 |
-
|
12 |
-
|
13 |
-
class DeepHeatmapsModel(object):
|
14 |
-
|
15 |
-
"""facial landmark localization Network"""
|
16 |
-
|
17 |
-
def __init__(self, mode='TRAIN', train_iter=500000, learning_rate=0.000001, image_size=256, c_dim=3, batch_size=10,
|
18 |
-
num_landmarks=68, img_path='data', save_log_path='logs', save_sample_path='sample',
|
19 |
-
save_model_path='model',test_model_path='model/deep_heatmaps-1000'):
|
20 |
-
|
21 |
-
self.mode = mode
|
22 |
-
self.train_iter=train_iter
|
23 |
-
self.learning_rate=learning_rate
|
24 |
-
|
25 |
-
self.image_size = image_size
|
26 |
-
self.c_dim = c_dim
|
27 |
-
self.batch_size = batch_size
|
28 |
-
|
29 |
-
self.num_landmarks = num_landmarks
|
30 |
-
|
31 |
-
self.save_log_path=save_log_path
|
32 |
-
self.save_sample_path=save_sample_path
|
33 |
-
self.save_model_path=save_model_path
|
34 |
-
self.test_model_path=test_model_path
|
35 |
-
self.img_path=img_path
|
36 |
-
|
37 |
-
self.momentum = 0.95
|
38 |
-
self.step = 20000 # for lr decay
|
39 |
-
self.gamma = 0.05 # for lr decay
|
40 |
-
|
41 |
-
self.weight_initializer = 'random_normal' # random_normal or xavier
|
42 |
-
self.weight_initializer_std = 0.01
|
43 |
-
self.bias_initializer = 0.0
|
44 |
-
|
45 |
-
self.l_weight_primary = 100.
|
46 |
-
self.l_weight_fusion = 3.*self.l_weight_primary
|
47 |
-
|
48 |
-
self.sigma = 6 # sigma for heatmap generation
|
49 |
-
self.scale = 'zero_center' # scale for image normalization '255' / '1' / 'zero_center'
|
50 |
-
|
51 |
-
self.print_every=2
|
52 |
-
self.save_every=100
|
53 |
-
self.sample_every_epoch = False
|
54 |
-
self.sample_every=10
|
55 |
-
self.sample_grid=4
|
56 |
-
self.log_every_epoch=1
|
57 |
-
self.log_histograms = True
|
58 |
-
|
59 |
-
self.config = tf.ConfigProto()
|
60 |
-
self.config.gpu_options.allow_growth = True
|
61 |
-
|
62 |
-
bb_dir = '/Users/arik/Desktop/DATA/face_data/300W/Bounding_Boxes/'
|
63 |
-
test_data='full' # if mode is TEST, this choose the set to use full/common/challenging/test
|
64 |
-
margin = 0.25 # for face crops
|
65 |
-
bb_type = 'gt' # gt/init
|
66 |
-
|
67 |
-
self.bb_dictionary = load_bb_dictionary(bb_dir, mode, test_data=test_data)
|
68 |
-
|
69 |
-
self.img_menpo_list = load_menpo_image_list(img_path, mode, self.bb_dictionary, image_size,
|
70 |
-
margin=margin, bb_type=bb_type, test_data=test_data)
|
71 |
-
|
72 |
-
if mode is 'TRAIN':
|
73 |
-
train_params = locals()
|
74 |
-
print_training_params_to_file(train_params)
|
75 |
-
|
76 |
-
def add_placeholders(self):
|
77 |
-
|
78 |
-
if self.mode == 'TEST':
|
79 |
-
self.test_images = tf.placeholder(
|
80 |
-
tf.float32, [None, self.image_size, self.image_size, self.c_dim], 'images')
|
81 |
-
# self.test_landmarks = tf.placeholder(tf.float32, [None, self.num_landmarks * 2], 'landmarks')
|
82 |
-
|
83 |
-
self.test_heatmaps = tf.placeholder(
|
84 |
-
tf.float32, [None, self.image_size, self.image_size, self.num_landmarks], 'heatmaps')
|
85 |
-
|
86 |
-
self.test_heatmaps_small = tf.placeholder(
|
87 |
-
tf.float32, [None, self.image_size/4, self.image_size/4, self.num_landmarks], 'heatmaps_small')
|
88 |
-
|
89 |
-
elif self.mode == 'TRAIN':
|
90 |
-
self.train_images = tf.placeholder(
|
91 |
-
tf.float32, [None, self.image_size, self.image_size, self.c_dim], 'train_images')
|
92 |
-
# self.train_landmarks = tf.placeholder(tf.float32, [None, self.num_landmarks*2], 'train_landmarks')
|
93 |
-
|
94 |
-
self.train_heatmaps = tf.placeholder(
|
95 |
-
tf.float32, [None, self.image_size, self.image_size, self.num_landmarks], 'train_heatmaps')
|
96 |
-
|
97 |
-
self.train_heatmaps_small = tf.placeholder(
|
98 |
-
tf.float32, [None, self.image_size/4, self.image_size/4, self.num_landmarks], 'train_heatmaps_small')
|
99 |
-
|
100 |
-
# self.valid_images = tf.placeholder(
|
101 |
-
# tf.float32, [None, self.image_size, self.image_size, self.c_dim], 'valid_images')
|
102 |
-
# # self.valid_landmarks = tf.placeholder(tf.float32, [None, self.num_landmarks * 2], 'valid_landmarks')
|
103 |
-
#
|
104 |
-
# self.valid_heatmaps = tf.placeholder(
|
105 |
-
# tf.float32, [None, self.image_size, self.image_size, self.num_landmarks], 'valid_heatmaps')
|
106 |
-
#
|
107 |
-
# self.valid_heatmaps_small = tf.placeholder(
|
108 |
-
# tf.float32,[None, self.image_size / 4, self.image_size / 4, self.num_landmarks], 'valid_heatmaps_small')
|
109 |
-
|
110 |
-
def heatmaps_network(self, input_images, reuse=None, name='pred_heatmaps'):
|
111 |
-
|
112 |
-
with tf.name_scope(name):
|
113 |
-
|
114 |
-
# if training is None:
|
115 |
-
# if self.mode == 'train':
|
116 |
-
# training = True
|
117 |
-
# else:
|
118 |
-
# training = False
|
119 |
-
|
120 |
-
if self.weight_initializer == 'xavier':
|
121 |
-
weight_initializer = contrib.layers.xavier_initializer()
|
122 |
-
else:
|
123 |
-
weight_initializer = tf.random_normal_initializer(stddev=self.weight_initializer_std)
|
124 |
-
|
125 |
-
bias_init = tf.constant_initializer(self.bias_initializer)
|
126 |
-
|
127 |
-
with tf.variable_scope('heatmaps_network'):
|
128 |
-
with tf.name_scope('primary_net'):
|
129 |
-
|
130 |
-
l1 = conv_relu_pool(input_images, 5, 128, conv_ker_init=weight_initializer, conv_bias_init=bias_init,
|
131 |
-
reuse=reuse, var_scope='conv_1')
|
132 |
-
l2 = conv_relu_pool(l1, 5, 128, conv_ker_init=weight_initializer, conv_bias_init=bias_init,
|
133 |
-
reuse=reuse, var_scope='conv_2')
|
134 |
-
l3 = conv_relu(l2, 5, 128, conv_ker_init=weight_initializer, conv_bias_init=bias_init,
|
135 |
-
reuse=reuse, var_scope='conv_3')
|
136 |
-
|
137 |
-
l4_1 = conv_relu(l3, 3, 128, conv_dilation=1, conv_ker_init=weight_initializer,
|
138 |
-
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_4_1')
|
139 |
-
l4_2 = conv_relu(l3, 3, 128, conv_dilation=2, conv_ker_init=weight_initializer,
|
140 |
-
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_4_2')
|
141 |
-
l4_3 = conv_relu(l3, 3, 128, conv_dilation=3, conv_ker_init=weight_initializer,
|
142 |
-
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_4_3')
|
143 |
-
l4_4 = conv_relu(l3, 3, 128, conv_dilation=4, conv_ker_init=weight_initializer,
|
144 |
-
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_4_4')
|
145 |
-
|
146 |
-
l4 = tf.concat([l4_1, l4_2, l4_3, l4_4], 3, name='conv_4')
|
147 |
-
|
148 |
-
l5_1 = conv_relu(l4, 3, 256, conv_dilation=1, conv_ker_init=weight_initializer,
|
149 |
-
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_5_1')
|
150 |
-
l5_2 = conv_relu(l4, 3, 256, conv_dilation=2, conv_ker_init=weight_initializer,
|
151 |
-
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_5_2')
|
152 |
-
l5_3 = conv_relu(l4, 3, 256, conv_dilation=3, conv_ker_init=weight_initializer,
|
153 |
-
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_5_3')
|
154 |
-
l5_4 = conv_relu(l4, 3, 256, conv_dilation=4, conv_ker_init=weight_initializer,
|
155 |
-
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_5_4')
|
156 |
-
|
157 |
-
l5 = tf.concat([l5_1, l5_2, l5_3, l5_4], 3, name='conv_5')
|
158 |
-
|
159 |
-
l6 = conv_relu(l5, 1, 512, conv_ker_init=weight_initializer,
|
160 |
-
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_6')
|
161 |
-
l7 = conv_relu(l6, 1, 256, conv_ker_init=weight_initializer,
|
162 |
-
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_7')
|
163 |
-
primary_out = conv(l7, 1, self.num_landmarks, conv_ker_init=weight_initializer,
|
164 |
-
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_8')
|
165 |
-
|
166 |
-
with tf.name_scope('fusion_net'):
|
167 |
-
|
168 |
-
l_fsn_0 = tf.concat([l3, l7], 3, name='conv_3_7_fsn')
|
169 |
-
|
170 |
-
l_fsn_1_1 = conv_relu(l_fsn_0, 3, 64, conv_dilation=1, conv_ker_init=weight_initializer,
|
171 |
-
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_fsn_1_1')
|
172 |
-
l_fsn_1_2 = conv_relu(l_fsn_0, 3, 64, conv_dilation=2, conv_ker_init=weight_initializer,
|
173 |
-
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_fsn_1_2')
|
174 |
-
l_fsn_1_3 = conv_relu(l_fsn_0, 3, 64, conv_dilation=3, conv_ker_init=weight_initializer,
|
175 |
-
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_fsn_1_3')
|
176 |
-
|
177 |
-
l_fsn_1 = tf.concat([l_fsn_1_1, l_fsn_1_2, l_fsn_1_3], 3, name='conv_fsn_1')
|
178 |
-
|
179 |
-
l_fsn_2_1 = conv_relu(l_fsn_1, 3, 64, conv_dilation=1, conv_ker_init=weight_initializer,
|
180 |
-
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_fsn_2_1')
|
181 |
-
l_fsn_2_2 = conv_relu(l_fsn_1, 3, 64, conv_dilation=2, conv_ker_init=weight_initializer,
|
182 |
-
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_fsn_2_2')
|
183 |
-
l_fsn_2_3 = conv_relu(l_fsn_1, 3, 64, conv_dilation=4, conv_ker_init=weight_initializer,
|
184 |
-
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_fsn_2_3')
|
185 |
-
l_fsn_2_4 = conv_relu(l_fsn_1, 5, 64, conv_dilation=3, conv_ker_init=weight_initializer,
|
186 |
-
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_fsn_2_4')
|
187 |
-
|
188 |
-
l_fsn_2 = tf.concat([l_fsn_2_1, l_fsn_2_2, l_fsn_2_3, l_fsn_2_4], 3, name='conv_fsn_2')
|
189 |
-
|
190 |
-
l_fsn_3_1 = conv_relu(l_fsn_2, 3, 128, conv_dilation=1, conv_ker_init=weight_initializer,
|
191 |
-
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_fsn_3_1')
|
192 |
-
l_fsn_3_2 = conv_relu(l_fsn_2, 3, 128, conv_dilation=2, conv_ker_init=weight_initializer,
|
193 |
-
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_fsn_3_2')
|
194 |
-
l_fsn_3_3 = conv_relu(l_fsn_2, 3, 128, conv_dilation=4, conv_ker_init=weight_initializer,
|
195 |
-
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_fsn_3_3')
|
196 |
-
l_fsn_3_4 = conv_relu(l_fsn_2, 5, 128, conv_dilation=3, conv_ker_init=weight_initializer,
|
197 |
-
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_fsn_3_4')
|
198 |
-
|
199 |
-
l_fsn_3 = tf.concat([l_fsn_3_1, l_fsn_3_2, l_fsn_3_3, l_fsn_3_4], 3, name='conv_fsn_3')
|
200 |
-
|
201 |
-
l_fsn_4 = conv_relu(l_fsn_3, 1, 256, conv_ker_init=weight_initializer,
|
202 |
-
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_fsn_4')
|
203 |
-
l_fsn_5 = conv(l_fsn_4, 1, self.num_landmarks, conv_ker_init=weight_initializer,
|
204 |
-
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_fsn_5')
|
205 |
-
|
206 |
-
with tf.name_scope('upsample_net'):
|
207 |
-
|
208 |
-
out = deconv(l_fsn_5, 8, self.num_landmarks, conv_stride=4,
|
209 |
-
conv_ker_init=deconv2d_bilinear_upsampling_initializer(
|
210 |
-
[8, 8, self.num_landmarks, self.num_landmarks]), conv_bias_init=bias_init,
|
211 |
-
reuse=reuse, var_scope='deconv_1')
|
212 |
-
|
213 |
-
self.all_layers = [l1, l2, l3, l4, l5, l6, l7, primary_out, l_fsn_1, l_fsn_2, l_fsn_3, l_fsn_4,
|
214 |
-
l_fsn_5, out]
|
215 |
-
|
216 |
-
return primary_out, out
|
217 |
-
|
218 |
-
def build_model(self):
|
219 |
-
if self.mode == 'TEST':
|
220 |
-
self.pred_hm_p, self.pred_hm_f = self.heatmaps_network(self.test_images)
|
221 |
-
elif self.mode == 'TRAIN':
|
222 |
-
self.pred_hm_p,self.pred_hm_f = self.heatmaps_network(self.train_images,name='pred_heatmaps_train')
|
223 |
-
# self.pred_landmarks_valid = self.landmarks_network(self.valid_images,name='pred_landmarks_valid')
|
224 |
-
# self.pred_landmarks_eval = self.landmarks_network(self.test_images,training=False,reuse=True,name='pred_landmarks_eval')
|
225 |
-
# self.pred_landmarks_train = self.landmarks_network(self.train_images, reuse=True, name='pred_landmarks_train')
|
226 |
-
|
227 |
-
def create_loss_ops(self):
|
228 |
-
|
229 |
-
def l2_loss_norm_eyes(pred_landmarks, real_landmarks, normalize=True, name='l2_loss'):
|
230 |
-
|
231 |
-
with tf.name_scope(name):
|
232 |
-
with tf.name_scope('real_pred_landmarks_diff'):
|
233 |
-
landmarks_diff = pred_landmarks - real_landmarks
|
234 |
-
|
235 |
-
if normalize:
|
236 |
-
with tf.name_scope('real_landmarks_eye_dist'):
|
237 |
-
with tf.name_scope('left_eye'):
|
238 |
-
p1_out = tf.slice(real_landmarks, [0, 72], [-1, 2])
|
239 |
-
p1_in = tf.slice(real_landmarks, [0, 78], [-1, 2])
|
240 |
-
p1 = (p1_in + p1_out) / 2
|
241 |
-
with tf.name_scope('right_eye'):
|
242 |
-
p2_out = tf.slice(real_landmarks, [0, 90], [-1, 2])
|
243 |
-
p2_in = tf.slice(real_landmarks, [0, 84], [-1, 2])
|
244 |
-
p2 = (p2_in + p2_out) / 2
|
245 |
-
eps = 1e-6
|
246 |
-
eye_dist = tf.expand_dims(tf.sqrt(tf.reduce_sum(tf.square(p1 - p2), axis=1)) + eps, axis=1)
|
247 |
-
norm_landmarks_diff = landmarks_diff / eye_dist
|
248 |
-
l2_landmarks_norm = tf.reduce_mean(tf.square(norm_landmarks_diff))
|
249 |
-
|
250 |
-
out = l2_landmarks_norm
|
251 |
-
else:
|
252 |
-
l2_landmarks = tf.reduce_mean(tf.square(landmarks_diff))
|
253 |
-
out = l2_landmarks
|
254 |
-
|
255 |
-
return out
|
256 |
-
|
257 |
-
if self.mode is 'TRAIN':
|
258 |
-
primary_maps_diff = self.pred_hm_p-self.train_heatmaps_small
|
259 |
-
fusion_maps_diff = self.pred_hm_f - self.train_heatmaps
|
260 |
-
|
261 |
-
self.l2_primary = tf.reduce_mean(tf.square(primary_maps_diff))
|
262 |
-
self.l2_fusion = tf.reduce_mean(tf.square(fusion_maps_diff))
|
263 |
-
|
264 |
-
self.total_loss = self.l_weight_primary * self.l2_primary + self.l_weight_fusion * self.l2_fusion
|
265 |
-
|
266 |
-
# self.l2_loss_batch_train = l2_loss_norm_eyes(self.pred_landmarks_train, self.train_landmarks,
|
267 |
-
# self.normalize_loss_by_eyes, name='loss_train_batch')
|
268 |
-
# with tf.name_scope('losses_not_for_train_step'):
|
269 |
-
# self.l2_loss_train = l2_loss_norm_eyes(self.pred_landmarks_train, self.train_landmarks,
|
270 |
-
# self.normalize_loss_by_eyes, name='train')
|
271 |
-
#
|
272 |
-
# self.l2_loss_valid = l2_loss_norm_eyes(self.pred_landmarks_valid, self.valid_landmarks,
|
273 |
-
# self.normalize_loss_by_eyes, name='valid')
|
274 |
-
# else:
|
275 |
-
# self.l2_loss_test = l2_loss_norm_eyes(self.pred_landmarks_eval, self.test_landmarks,
|
276 |
-
# self.normalize_loss_by_eyes)
|
277 |
-
|
278 |
-
# def predict_landmarks_in_batches(self,image_paths,session):
|
279 |
-
#
|
280 |
-
# num_batches = int(1.*len(image_paths)/self.batch_size)
|
281 |
-
# if num_batches == 0:
|
282 |
-
# batch_size = len(image_paths)
|
283 |
-
# num_batches = 1
|
284 |
-
# else:
|
285 |
-
# batch_size = self.batch_size
|
286 |
-
#
|
287 |
-
# for i in range(num_batches):
|
288 |
-
# batch_image_paths = image_paths[i * batch_size:(i + 1) * batch_size]
|
289 |
-
# batch_images, _ = \
|
290 |
-
# load_data(batch_image_paths, None, self.image_size, self.num_landmarks, conv=True)
|
291 |
-
# if i == 0:
|
292 |
-
# all_pred_landmarks = session.run(self.pred_landmarks_eval,{self.test_images:batch_images})
|
293 |
-
# else:
|
294 |
-
# batch_pred = session.run(self.pred_landmarks_eval,{self.test_images:batch_images})
|
295 |
-
# all_pred_landmarks = np.concatenate((all_pred_landmarks,batch_pred),0)
|
296 |
-
#
|
297 |
-
# reminder = len(image_paths)-num_batches*batch_size
|
298 |
-
# if reminder >0:
|
299 |
-
# reminder_paths = image_paths[-reminder:]
|
300 |
-
# batch_images, _ = \
|
301 |
-
# load_data(reminder_paths, None, self.image_size, self.num_landmarks, conv=True)
|
302 |
-
# batch_pred = session.run(self.pred_landmarks_eval,{self.test_images:batch_images})
|
303 |
-
# all_pred_landmarks = np.concatenate((all_pred_landmarks, batch_pred), 0)
|
304 |
-
#
|
305 |
-
# return all_pred_landmarks
|
306 |
-
|
307 |
-
def create_summary_ops(self):
|
308 |
-
|
309 |
-
var_summary = [tf.summary.histogram(var.name,var) for var in tf.trainable_variables()]
|
310 |
-
grads = tf.gradients(self.total_loss, tf.trainable_variables())
|
311 |
-
grads = list(zip(grads, tf.trainable_variables()))
|
312 |
-
grad_summary = [tf.summary.histogram(var.name+'/grads',grad) for grad,var in grads]
|
313 |
-
activ_summary = [tf.summary.histogram(layer.name, layer) for layer in self.all_layers]
|
314 |
-
l2_primary = tf.summary.scalar('l2_primary', self.l2_primary)
|
315 |
-
l2_fusion = tf.summary.scalar('l2_fusion', self.l2_fusion)
|
316 |
-
l_total = tf.summary.scalar('l_total', self.total_loss)
|
317 |
-
|
318 |
-
if self.log_histograms:
|
319 |
-
self.batch_summary_op = tf.summary.merge([l2_primary, l2_fusion, l_total, var_summary, grad_summary,
|
320 |
-
activ_summary])
|
321 |
-
else:
|
322 |
-
self.batch_summary_op = tf.summary.merge([l2_primary, l2_fusion, l_total])
|
323 |
-
|
324 |
-
# l2_train_loss_summary = tf.summary.scalar('l2_loss_train', self.l2_loss_train)
|
325 |
-
# l2_valid_loss_summary = tf.summary.scalar('l2_loss_valid', self.l2_loss_valid)
|
326 |
-
#
|
327 |
-
# self.epoch_summary_op = tf.summary.merge([l2_train_loss_summary, l2_valid_loss_summary])
|
328 |
-
|
329 |
-
def eval(self):
|
330 |
-
|
331 |
-
self.add_placeholders()
|
332 |
-
# build model
|
333 |
-
self.build_model()
|
334 |
-
|
335 |
-
num_images = len(self.img_menpo_list)
|
336 |
-
img_inds = np.arange(num_images)
|
337 |
-
|
338 |
-
sample_iter = int(1. * len(num_images) / self.sample_grid)
|
339 |
-
|
340 |
-
if self.max_test_sample is not None:
|
341 |
-
if self.max_test_sample < sample_iter:
|
342 |
-
sample_iter = self.max_test_sample
|
343 |
-
|
344 |
-
with tf.Session(config=self.config) as sess:
|
345 |
-
|
346 |
-
# load trained parameters
|
347 |
-
print ('loading test model...')
|
348 |
-
saver = tf.train.Saver()
|
349 |
-
saver.restore(sess, self.test_model_path)
|
350 |
-
|
351 |
-
_, model_name = os.path.split(self.test_model_path)
|
352 |
-
|
353 |
-
# if self.new_test_data is False:
|
354 |
-
# # create loss ops
|
355 |
-
# self.create_loss_ops()
|
356 |
-
#
|
357 |
-
# all_test_pred_landmarks = self.predict_landmarks_in_batches(test_data_paths, session=sess)
|
358 |
-
# _, all_test_real_landmarks = load_data(None, test_landmarks_paths, self.image_size,
|
359 |
-
# self.num_landmarks, conv=True)
|
360 |
-
# all_test_loss = sess.run(self.l2_loss_test, {self.pred_landmarks_eval: all_test_pred_landmarks,
|
361 |
-
# self.test_landmarks: all_test_real_landmarks})
|
362 |
-
# with open(os.path.join(self.save_log_path, model_name+'-test_loss.txt'), 'w') as f:
|
363 |
-
# f.write(str(all_test_loss))
|
364 |
-
|
365 |
-
for i in range(sample_iter):
|
366 |
-
|
367 |
-
batch_inds = img_inds[i * self.sample_grid:(i + 1) * self.sample_grid]
|
368 |
-
|
369 |
-
batch_images, _, _, _ = \
|
370 |
-
load_data(self.img_menpo_list, batch_inds, image_size=self.image_size, c_dim=self.c_dim,
|
371 |
-
num_landmarks=self.num_landmarks, sigma=self.sigma, scale=self.scale,
|
372 |
-
save_landmarks=False)
|
373 |
-
|
374 |
-
batch_maps_pred, batch_maps_small_pred = sess.run([self.pred_hm_f, self.pred_hm_p],
|
375 |
-
{self.test_images: batch_images})
|
376 |
-
|
377 |
-
sample_path_imgs = os.path.join(self.save_sample_path, model_name + '-sample-%d-to-%d-1.png' % (
|
378 |
-
i * self.sample_grid, (i + 1) * self.sample_grid))
|
379 |
-
|
380 |
-
sample_path_maps = os.path.join(self.save_sample_path, model_name + '-sample-%d-to-%d-2.png' % (
|
381 |
-
i * self.sample_grid, (i + 1) * self.sample_grid))
|
382 |
-
|
383 |
-
merged_img = merge_images_landmarks_maps(
|
384 |
-
batch_images, batch_maps_pred, image_size=self.image_size,
|
385 |
-
num_landmarks=self.num_landmarks, num_samples=self.sample_grid, scale=self.scale)
|
386 |
-
|
387 |
-
merged_map = merge_compare_maps(
|
388 |
-
batch_maps_small_pred, batch_maps_pred, image_size=self.image_size/4,
|
389 |
-
num_landmarks=self.num_landmarks, num_samples=self.sample_grid)
|
390 |
-
|
391 |
-
scipy.misc.imsave(sample_path_imgs, merged_img)
|
392 |
-
scipy.misc.imsave(sample_path_maps, merged_map)
|
393 |
-
|
394 |
-
print ('saved %s' % sample_path_imgs)
|
395 |
-
|
396 |
-
def train(self):
|
397 |
-
tf.set_random_seed(1234)
|
398 |
-
# build a graph
|
399 |
-
# add placeholders
|
400 |
-
self.add_placeholders()
|
401 |
-
# build model
|
402 |
-
self.build_model()
|
403 |
-
# create loss ops
|
404 |
-
self.create_loss_ops()
|
405 |
-
# create summary ops
|
406 |
-
self.create_summary_ops()
|
407 |
-
|
408 |
-
# create optimizer and training op
|
409 |
-
global_step = tf.Variable(0, trainable=False)
|
410 |
-
lr = tf.train.exponential_decay(self.learning_rate,global_step, self.step, self.gamma, staircase=True)
|
411 |
-
optimizer = tf.train.MomentumOptimizer(lr,self.momentum)
|
412 |
-
|
413 |
-
train_op = optimizer.minimize(self.total_loss,global_step=global_step)
|
414 |
-
|
415 |
-
with tf.Session(config=self.config) as sess:
|
416 |
-
|
417 |
-
tf.global_variables_initializer().run()
|
418 |
-
|
419 |
-
# create model saver and file writer
|
420 |
-
summary_writer = tf.summary.FileWriter(logdir=self.save_log_path, graph=tf.get_default_graph())
|
421 |
-
saver = tf.train.Saver()
|
422 |
-
|
423 |
-
print
|
424 |
-
print('*** Start Training ***')
|
425 |
-
|
426 |
-
# set random seed
|
427 |
-
epoch = 0
|
428 |
-
print_epoch=True
|
429 |
-
|
430 |
-
num_train_images = len(self.img_menpo_list)
|
431 |
-
num_train_images=10
|
432 |
-
img_inds = np.arange(num_train_images)
|
433 |
-
np.random.shuffle(img_inds)
|
434 |
-
|
435 |
-
for step in range(self.train_iter + 1):
|
436 |
-
|
437 |
-
# get batch images
|
438 |
-
j = step % int(float(num_train_images) / float(self.batch_size))
|
439 |
-
|
440 |
-
if step > 0 and j == 0:
|
441 |
-
np.random.shuffle(img_inds) # shuffle data if finished epoch
|
442 |
-
epoch += 1
|
443 |
-
print_epoch=True
|
444 |
-
|
445 |
-
batch_inds = img_inds[j * self.batch_size:(j + 1) * self.batch_size]
|
446 |
-
|
447 |
-
batch_images, batch_maps, batch_maps_small, _ =\
|
448 |
-
load_data(self.img_menpo_list, batch_inds, image_size=self.image_size, c_dim=self.c_dim,
|
449 |
-
num_landmarks=self.num_landmarks, sigma=self.sigma, scale=self.scale, save_landmarks=False)
|
450 |
-
|
451 |
-
feed_dict_train = {self.train_images: batch_images, self.train_heatmaps: batch_maps,
|
452 |
-
self.train_heatmaps_small: batch_maps_small}
|
453 |
-
|
454 |
-
sess.run(train_op, feed_dict_train)
|
455 |
-
|
456 |
-
# print loss every *log_every_epoch* epoch
|
457 |
-
# if step == 0 or (step+1) == self.train_iter or (epoch % self.log_every_epoch ==0 and print_epoch):
|
458 |
-
# if self.sample_every_epoch is not True:
|
459 |
-
# print_epoch=False
|
460 |
-
# all_train_pred_landmarks=self.predict_landmarks_in_batches(train_data_paths,session=sess)
|
461 |
-
# _,all_train_real_landmarks = load_data(None,train_landmarks_paths,self.image_size,
|
462 |
-
# self.num_landmarks, conv=True)
|
463 |
-
# all_train_loss = sess.run(self.l2_loss_train,{self.pred_landmarks_train:all_train_pred_landmarks,
|
464 |
-
# self.train_landmarks:all_train_real_landmarks})
|
465 |
-
#
|
466 |
-
# all_valid_pred_landmarks = self.predict_landmarks_in_batches(valid_data_paths,session=sess)
|
467 |
-
# _, all_valid_real_landmarks = load_data(None, valid_landmarks_paths, self.image_size,
|
468 |
-
# self.num_landmarks, conv=True)
|
469 |
-
# all_valid_loss = sess.run(self.l2_loss_valid, {self.pred_landmarks_valid: all_valid_pred_landmarks,
|
470 |
-
# self.valid_landmarks: all_valid_real_landmarks})
|
471 |
-
# print("--------- EPOCH %d ---------" % (epoch))
|
472 |
-
# print ('step: [%d/%d] train loss: [%.6f] valid loss: [%.6f]'
|
473 |
-
# % (step + 1, self.train_iter, all_train_loss, all_valid_loss))
|
474 |
-
# print("----------------------------")
|
475 |
-
# summary= sess.run(self.epoch_summary_op,{self.l2_loss_valid:all_valid_loss,self.l2_loss_train:all_train_loss})
|
476 |
-
# summary_writer.add_summary(summary, epoch)
|
477 |
-
|
478 |
-
# save to log and print status
|
479 |
-
if step == 0 or (step + 1) % self.print_every == 0:
|
480 |
-
|
481 |
-
summary, l_p, l_f, l_t = sess.run(
|
482 |
-
[self.batch_summary_op, self.l2_primary,self.l2_fusion,self.total_loss],
|
483 |
-
feed_dict_train)
|
484 |
-
|
485 |
-
summary_writer.add_summary(summary, step)
|
486 |
-
|
487 |
-
print ('epoch: [%d] step: [%d/%d] primary loss: [%.6f] fusion loss: [%.6f] total loss: [%.6f]'
|
488 |
-
% (epoch, step + 1, self.train_iter, l_p, l_f, l_t))
|
489 |
-
|
490 |
-
# save model
|
491 |
-
if (step + 1) % self.save_every == 0:
|
492 |
-
saver.save(sess, os.path.join(self.save_model_path, 'deep_heatmaps'), global_step=step + 1)
|
493 |
-
print ('model/deep-heatmaps-%d saved' % (step + 1))
|
494 |
-
|
495 |
-
# save images with landmarks
|
496 |
-
if self.sample_every_epoch and (epoch % self.log_every_epoch ==0 and print_epoch):
|
497 |
-
print_epoch = False
|
498 |
-
|
499 |
-
# train_pred = sess.run(self.pred_landmarks_eval, {self.test_images: batch_images})
|
500 |
-
# valid_pred = sess.run(self.pred_landmarks_eval, {self.test_images: valid_images_sample})
|
501 |
-
#
|
502 |
-
# train_sample_path = os.path.join(self.save_sample_path, 'train-epoch-%d.png' % (epoch))
|
503 |
-
# valid_sample_path = os.path.join(self.save_sample_path, 'valid-epoch-%d.png' % (epoch))
|
504 |
-
#
|
505 |
-
# merge_images_train = merge_images_with_landmarks(batch_images, train_pred, self.image_size,
|
506 |
-
# self.num_landmarks, self.sample_grid)
|
507 |
-
# merge_images_valid = merge_images_with_landmarks(valid_images_sample, valid_pred,
|
508 |
-
# self.image_size, self.num_landmarks,
|
509 |
-
# self.sample_grid)
|
510 |
-
#
|
511 |
-
# scipy.misc.imsave(train_sample_path, merge_images_train)
|
512 |
-
# scipy.misc.imsave(valid_sample_path, merge_images_valid)
|
513 |
-
|
514 |
-
elif (self.sample_every_epoch is False) and (step == 0 or (step + 1) % self.sample_every == 0):
|
515 |
-
|
516 |
-
batch_maps_pred, batch_maps_small_pred = sess.run([self.pred_hm_f, self.pred_hm_p],
|
517 |
-
{self.train_images: batch_images})
|
518 |
-
|
519 |
-
print 'map vals', batch_maps_pred.min(), batch_maps_pred.max()
|
520 |
-
print 'small map vals', batch_maps_small_pred.min(), batch_maps_small_pred.max()
|
521 |
-
|
522 |
-
sample_path_imgs = os.path.join(self.save_sample_path,'epoch-%d-train-iter-%d-1.png' % (epoch, step + 1))
|
523 |
-
sample_path_maps = os.path.join(self.save_sample_path,'epoch-%d-train-iter-%d-2.png' % (epoch, step + 1))
|
524 |
-
|
525 |
-
merged_img = merge_images_landmarks_maps(
|
526 |
-
batch_images, batch_maps_pred, image_size=self.image_size,
|
527 |
-
num_landmarks=self.num_landmarks, num_samples=self.sample_grid, scale=self.scale)
|
528 |
-
|
529 |
-
merged_map = merge_compare_maps(
|
530 |
-
batch_maps_small_pred, batch_maps_pred, image_size=self.image_size/4,
|
531 |
-
num_landmarks=self.num_landmarks, num_samples=self.sample_grid)
|
532 |
-
|
533 |
-
scipy.misc.imsave(sample_path_imgs, merged_img)
|
534 |
-
scipy.misc.imsave(sample_path_maps, merged_map)
|
535 |
-
|
536 |
-
print('*** Finished Training ***')
|
537 |
-
# evaluate model on test set
|
538 |
-
# all_test_pred_landmarks = self.predict_landmarks_in_batches(test_data_paths,session=sess)
|
539 |
-
# _, all_test_real_landmarks = load_data(None, test_landmarks_paths, self.image_size,
|
540 |
-
# self.num_landmarks, conv=True)
|
541 |
-
# all_test_loss = sess.run(self.l2_loss_test, {self.pred_landmarks_test: all_test_pred_landmarks,
|
542 |
-
# self.test_landmarks: all_test_real_landmarks})
|
543 |
-
#
|
544 |
-
# print ('step: [%d/%d] test loss: [%.6f]' % (step, self.train_iter, all_test_loss))
|
|
|
1 |
+
import scipy.io
|
2 |
+
import scipy.misc
|
3 |
+
from glob import glob
|
4 |
+
import os
|
5 |
+
import numpy as np
|
6 |
+
from image_utils import *
|
7 |
+
from ops import *
|
8 |
+
from sklearn.model_selection import train_test_split
|
9 |
+
import tensorflow as tf
|
10 |
+
from tensorflow import contrib
|
11 |
+
|
12 |
+
|
13 |
+
class DeepHeatmapsModel(object):
|
14 |
+
|
15 |
+
"""facial landmark localization Network"""
|
16 |
+
|
17 |
+
def __init__(self, mode='TRAIN', train_iter=500000, learning_rate=0.000001, image_size=256, c_dim=3, batch_size=10,
|
18 |
+
num_landmarks=68, img_path='data', save_log_path='logs', save_sample_path='sample',
|
19 |
+
save_model_path='model',test_model_path='model/deep_heatmaps-1000'):
|
20 |
+
|
21 |
+
self.mode = mode
|
22 |
+
self.train_iter=train_iter
|
23 |
+
self.learning_rate=learning_rate
|
24 |
+
|
25 |
+
self.image_size = image_size
|
26 |
+
self.c_dim = c_dim
|
27 |
+
self.batch_size = batch_size
|
28 |
+
|
29 |
+
self.num_landmarks = num_landmarks
|
30 |
+
|
31 |
+
self.save_log_path=save_log_path
|
32 |
+
self.save_sample_path=save_sample_path
|
33 |
+
self.save_model_path=save_model_path
|
34 |
+
self.test_model_path=test_model_path
|
35 |
+
self.img_path=img_path
|
36 |
+
|
37 |
+
self.momentum = 0.95
|
38 |
+
self.step = 20000 # for lr decay
|
39 |
+
self.gamma = 0.05 # for lr decay
|
40 |
+
|
41 |
+
self.weight_initializer = 'random_normal' # random_normal or xavier
|
42 |
+
self.weight_initializer_std = 0.01
|
43 |
+
self.bias_initializer = 0.0
|
44 |
+
|
45 |
+
self.l_weight_primary = 100.
|
46 |
+
self.l_weight_fusion = 3.*self.l_weight_primary
|
47 |
+
|
48 |
+
self.sigma = 6 # sigma for heatmap generation
|
49 |
+
self.scale = 'zero_center' # scale for image normalization '255' / '1' / 'zero_center'
|
50 |
+
|
51 |
+
self.print_every=2
|
52 |
+
self.save_every=100
|
53 |
+
self.sample_every_epoch = False
|
54 |
+
self.sample_every=10
|
55 |
+
self.sample_grid=4
|
56 |
+
self.log_every_epoch=1
|
57 |
+
self.log_histograms = True
|
58 |
+
|
59 |
+
self.config = tf.ConfigProto()
|
60 |
+
self.config.gpu_options.allow_growth = True
|
61 |
+
|
62 |
+
bb_dir = '/Users/arik/Desktop/DATA/face_data/300W/Bounding_Boxes/'
|
63 |
+
test_data='full' # if mode is TEST, this choose the set to use full/common/challenging/test
|
64 |
+
margin = 0.25 # for face crops
|
65 |
+
bb_type = 'gt' # gt/init
|
66 |
+
|
67 |
+
self.bb_dictionary = load_bb_dictionary(bb_dir, mode, test_data=test_data)
|
68 |
+
|
69 |
+
self.img_menpo_list = load_menpo_image_list(img_path, mode, self.bb_dictionary, image_size,
|
70 |
+
margin=margin, bb_type=bb_type, test_data=test_data)
|
71 |
+
|
72 |
+
if mode is 'TRAIN':
|
73 |
+
train_params = locals()
|
74 |
+
print_training_params_to_file(train_params)
|
75 |
+
|
76 |
+
def add_placeholders(self):
|
77 |
+
|
78 |
+
if self.mode == 'TEST':
|
79 |
+
self.test_images = tf.placeholder(
|
80 |
+
tf.float32, [None, self.image_size, self.image_size, self.c_dim], 'images')
|
81 |
+
# self.test_landmarks = tf.placeholder(tf.float32, [None, self.num_landmarks * 2], 'landmarks')
|
82 |
+
|
83 |
+
self.test_heatmaps = tf.placeholder(
|
84 |
+
tf.float32, [None, self.image_size, self.image_size, self.num_landmarks], 'heatmaps')
|
85 |
+
|
86 |
+
self.test_heatmaps_small = tf.placeholder(
|
87 |
+
tf.float32, [None, self.image_size/4, self.image_size/4, self.num_landmarks], 'heatmaps_small')
|
88 |
+
|
89 |
+
elif self.mode == 'TRAIN':
|
90 |
+
self.train_images = tf.placeholder(
|
91 |
+
tf.float32, [None, self.image_size, self.image_size, self.c_dim], 'train_images')
|
92 |
+
# self.train_landmarks = tf.placeholder(tf.float32, [None, self.num_landmarks*2], 'train_landmarks')
|
93 |
+
|
94 |
+
self.train_heatmaps = tf.placeholder(
|
95 |
+
tf.float32, [None, self.image_size, self.image_size, self.num_landmarks], 'train_heatmaps')
|
96 |
+
|
97 |
+
self.train_heatmaps_small = tf.placeholder(
|
98 |
+
tf.float32, [None, self.image_size/4, self.image_size/4, self.num_landmarks], 'train_heatmaps_small')
|
99 |
+
|
100 |
+
# self.valid_images = tf.placeholder(
|
101 |
+
# tf.float32, [None, self.image_size, self.image_size, self.c_dim], 'valid_images')
|
102 |
+
# # self.valid_landmarks = tf.placeholder(tf.float32, [None, self.num_landmarks * 2], 'valid_landmarks')
|
103 |
+
#
|
104 |
+
# self.valid_heatmaps = tf.placeholder(
|
105 |
+
# tf.float32, [None, self.image_size, self.image_size, self.num_landmarks], 'valid_heatmaps')
|
106 |
+
#
|
107 |
+
# self.valid_heatmaps_small = tf.placeholder(
|
108 |
+
# tf.float32,[None, self.image_size / 4, self.image_size / 4, self.num_landmarks], 'valid_heatmaps_small')
|
109 |
+
|
110 |
+
def heatmaps_network(self, input_images, reuse=None, name='pred_heatmaps'):
|
111 |
+
|
112 |
+
with tf.name_scope(name):
|
113 |
+
|
114 |
+
# if training is None:
|
115 |
+
# if self.mode == 'train':
|
116 |
+
# training = True
|
117 |
+
# else:
|
118 |
+
# training = False
|
119 |
+
|
120 |
+
if self.weight_initializer == 'xavier':
|
121 |
+
weight_initializer = contrib.layers.xavier_initializer()
|
122 |
+
else:
|
123 |
+
weight_initializer = tf.random_normal_initializer(stddev=self.weight_initializer_std)
|
124 |
+
|
125 |
+
bias_init = tf.constant_initializer(self.bias_initializer)
|
126 |
+
|
127 |
+
with tf.variable_scope('heatmaps_network'):
|
128 |
+
with tf.name_scope('primary_net'):
|
129 |
+
|
130 |
+
l1 = conv_relu_pool(input_images, 5, 128, conv_ker_init=weight_initializer, conv_bias_init=bias_init,
|
131 |
+
reuse=reuse, var_scope='conv_1')
|
132 |
+
l2 = conv_relu_pool(l1, 5, 128, conv_ker_init=weight_initializer, conv_bias_init=bias_init,
|
133 |
+
reuse=reuse, var_scope='conv_2')
|
134 |
+
l3 = conv_relu(l2, 5, 128, conv_ker_init=weight_initializer, conv_bias_init=bias_init,
|
135 |
+
reuse=reuse, var_scope='conv_3')
|
136 |
+
|
137 |
+
l4_1 = conv_relu(l3, 3, 128, conv_dilation=1, conv_ker_init=weight_initializer,
|
138 |
+
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_4_1')
|
139 |
+
l4_2 = conv_relu(l3, 3, 128, conv_dilation=2, conv_ker_init=weight_initializer,
|
140 |
+
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_4_2')
|
141 |
+
l4_3 = conv_relu(l3, 3, 128, conv_dilation=3, conv_ker_init=weight_initializer,
|
142 |
+
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_4_3')
|
143 |
+
l4_4 = conv_relu(l3, 3, 128, conv_dilation=4, conv_ker_init=weight_initializer,
|
144 |
+
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_4_4')
|
145 |
+
|
146 |
+
l4 = tf.concat([l4_1, l4_2, l4_3, l4_4], 3, name='conv_4')
|
147 |
+
|
148 |
+
l5_1 = conv_relu(l4, 3, 256, conv_dilation=1, conv_ker_init=weight_initializer,
|
149 |
+
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_5_1')
|
150 |
+
l5_2 = conv_relu(l4, 3, 256, conv_dilation=2, conv_ker_init=weight_initializer,
|
151 |
+
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_5_2')
|
152 |
+
l5_3 = conv_relu(l4, 3, 256, conv_dilation=3, conv_ker_init=weight_initializer,
|
153 |
+
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_5_3')
|
154 |
+
l5_4 = conv_relu(l4, 3, 256, conv_dilation=4, conv_ker_init=weight_initializer,
|
155 |
+
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_5_4')
|
156 |
+
|
157 |
+
l5 = tf.concat([l5_1, l5_2, l5_3, l5_4], 3, name='conv_5')
|
158 |
+
|
159 |
+
l6 = conv_relu(l5, 1, 512, conv_ker_init=weight_initializer,
|
160 |
+
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_6')
|
161 |
+
l7 = conv_relu(l6, 1, 256, conv_ker_init=weight_initializer,
|
162 |
+
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_7')
|
163 |
+
primary_out = conv(l7, 1, self.num_landmarks, conv_ker_init=weight_initializer,
|
164 |
+
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_8')
|
165 |
+
|
166 |
+
with tf.name_scope('fusion_net'):
|
167 |
+
|
168 |
+
l_fsn_0 = tf.concat([l3, l7], 3, name='conv_3_7_fsn')
|
169 |
+
|
170 |
+
l_fsn_1_1 = conv_relu(l_fsn_0, 3, 64, conv_dilation=1, conv_ker_init=weight_initializer,
|
171 |
+
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_fsn_1_1')
|
172 |
+
l_fsn_1_2 = conv_relu(l_fsn_0, 3, 64, conv_dilation=2, conv_ker_init=weight_initializer,
|
173 |
+
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_fsn_1_2')
|
174 |
+
l_fsn_1_3 = conv_relu(l_fsn_0, 3, 64, conv_dilation=3, conv_ker_init=weight_initializer,
|
175 |
+
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_fsn_1_3')
|
176 |
+
|
177 |
+
l_fsn_1 = tf.concat([l_fsn_1_1, l_fsn_1_2, l_fsn_1_3], 3, name='conv_fsn_1')
|
178 |
+
|
179 |
+
l_fsn_2_1 = conv_relu(l_fsn_1, 3, 64, conv_dilation=1, conv_ker_init=weight_initializer,
|
180 |
+
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_fsn_2_1')
|
181 |
+
l_fsn_2_2 = conv_relu(l_fsn_1, 3, 64, conv_dilation=2, conv_ker_init=weight_initializer,
|
182 |
+
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_fsn_2_2')
|
183 |
+
l_fsn_2_3 = conv_relu(l_fsn_1, 3, 64, conv_dilation=4, conv_ker_init=weight_initializer,
|
184 |
+
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_fsn_2_3')
|
185 |
+
l_fsn_2_4 = conv_relu(l_fsn_1, 5, 64, conv_dilation=3, conv_ker_init=weight_initializer,
|
186 |
+
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_fsn_2_4')
|
187 |
+
|
188 |
+
l_fsn_2 = tf.concat([l_fsn_2_1, l_fsn_2_2, l_fsn_2_3, l_fsn_2_4], 3, name='conv_fsn_2')
|
189 |
+
|
190 |
+
l_fsn_3_1 = conv_relu(l_fsn_2, 3, 128, conv_dilation=1, conv_ker_init=weight_initializer,
|
191 |
+
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_fsn_3_1')
|
192 |
+
l_fsn_3_2 = conv_relu(l_fsn_2, 3, 128, conv_dilation=2, conv_ker_init=weight_initializer,
|
193 |
+
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_fsn_3_2')
|
194 |
+
l_fsn_3_3 = conv_relu(l_fsn_2, 3, 128, conv_dilation=4, conv_ker_init=weight_initializer,
|
195 |
+
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_fsn_3_3')
|
196 |
+
l_fsn_3_4 = conv_relu(l_fsn_2, 5, 128, conv_dilation=3, conv_ker_init=weight_initializer,
|
197 |
+
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_fsn_3_4')
|
198 |
+
|
199 |
+
l_fsn_3 = tf.concat([l_fsn_3_1, l_fsn_3_2, l_fsn_3_3, l_fsn_3_4], 3, name='conv_fsn_3')
|
200 |
+
|
201 |
+
l_fsn_4 = conv_relu(l_fsn_3, 1, 256, conv_ker_init=weight_initializer,
|
202 |
+
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_fsn_4')
|
203 |
+
l_fsn_5 = conv(l_fsn_4, 1, self.num_landmarks, conv_ker_init=weight_initializer,
|
204 |
+
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_fsn_5')
|
205 |
+
|
206 |
+
with tf.name_scope('upsample_net'):
|
207 |
+
|
208 |
+
out = deconv(l_fsn_5, 8, self.num_landmarks, conv_stride=4,
|
209 |
+
conv_ker_init=deconv2d_bilinear_upsampling_initializer(
|
210 |
+
[8, 8, self.num_landmarks, self.num_landmarks]), conv_bias_init=bias_init,
|
211 |
+
reuse=reuse, var_scope='deconv_1')
|
212 |
+
|
213 |
+
self.all_layers = [l1, l2, l3, l4, l5, l6, l7, primary_out, l_fsn_1, l_fsn_2, l_fsn_3, l_fsn_4,
|
214 |
+
l_fsn_5, out]
|
215 |
+
|
216 |
+
return primary_out, out
|
217 |
+
|
218 |
+
def build_model(self):
|
219 |
+
if self.mode == 'TEST':
|
220 |
+
self.pred_hm_p, self.pred_hm_f = self.heatmaps_network(self.test_images)
|
221 |
+
elif self.mode == 'TRAIN':
|
222 |
+
self.pred_hm_p,self.pred_hm_f = self.heatmaps_network(self.train_images,name='pred_heatmaps_train')
|
223 |
+
# self.pred_landmarks_valid = self.landmarks_network(self.valid_images,name='pred_landmarks_valid')
|
224 |
+
# self.pred_landmarks_eval = self.landmarks_network(self.test_images,training=False,reuse=True,name='pred_landmarks_eval')
|
225 |
+
# self.pred_landmarks_train = self.landmarks_network(self.train_images, reuse=True, name='pred_landmarks_train')
|
226 |
+
|
227 |
+
def create_loss_ops(self):
|
228 |
+
|
229 |
+
def l2_loss_norm_eyes(pred_landmarks, real_landmarks, normalize=True, name='l2_loss'):
|
230 |
+
|
231 |
+
with tf.name_scope(name):
|
232 |
+
with tf.name_scope('real_pred_landmarks_diff'):
|
233 |
+
landmarks_diff = pred_landmarks - real_landmarks
|
234 |
+
|
235 |
+
if normalize:
|
236 |
+
with tf.name_scope('real_landmarks_eye_dist'):
|
237 |
+
with tf.name_scope('left_eye'):
|
238 |
+
p1_out = tf.slice(real_landmarks, [0, 72], [-1, 2])
|
239 |
+
p1_in = tf.slice(real_landmarks, [0, 78], [-1, 2])
|
240 |
+
p1 = (p1_in + p1_out) / 2
|
241 |
+
with tf.name_scope('right_eye'):
|
242 |
+
p2_out = tf.slice(real_landmarks, [0, 90], [-1, 2])
|
243 |
+
p2_in = tf.slice(real_landmarks, [0, 84], [-1, 2])
|
244 |
+
p2 = (p2_in + p2_out) / 2
|
245 |
+
eps = 1e-6
|
246 |
+
eye_dist = tf.expand_dims(tf.sqrt(tf.reduce_sum(tf.square(p1 - p2), axis=1)) + eps, axis=1)
|
247 |
+
norm_landmarks_diff = landmarks_diff / eye_dist
|
248 |
+
l2_landmarks_norm = tf.reduce_mean(tf.square(norm_landmarks_diff))
|
249 |
+
|
250 |
+
out = l2_landmarks_norm
|
251 |
+
else:
|
252 |
+
l2_landmarks = tf.reduce_mean(tf.square(landmarks_diff))
|
253 |
+
out = l2_landmarks
|
254 |
+
|
255 |
+
return out
|
256 |
+
|
257 |
+
if self.mode is 'TRAIN':
|
258 |
+
primary_maps_diff = self.pred_hm_p-self.train_heatmaps_small
|
259 |
+
fusion_maps_diff = self.pred_hm_f - self.train_heatmaps
|
260 |
+
|
261 |
+
self.l2_primary = tf.reduce_mean(tf.square(primary_maps_diff))
|
262 |
+
self.l2_fusion = tf.reduce_mean(tf.square(fusion_maps_diff))
|
263 |
+
|
264 |
+
self.total_loss = self.l_weight_primary * self.l2_primary + self.l_weight_fusion * self.l2_fusion
|
265 |
+
|
266 |
+
# self.l2_loss_batch_train = l2_loss_norm_eyes(self.pred_landmarks_train, self.train_landmarks,
|
267 |
+
# self.normalize_loss_by_eyes, name='loss_train_batch')
|
268 |
+
# with tf.name_scope('losses_not_for_train_step'):
|
269 |
+
# self.l2_loss_train = l2_loss_norm_eyes(self.pred_landmarks_train, self.train_landmarks,
|
270 |
+
# self.normalize_loss_by_eyes, name='train')
|
271 |
+
#
|
272 |
+
# self.l2_loss_valid = l2_loss_norm_eyes(self.pred_landmarks_valid, self.valid_landmarks,
|
273 |
+
# self.normalize_loss_by_eyes, name='valid')
|
274 |
+
# else:
|
275 |
+
# self.l2_loss_test = l2_loss_norm_eyes(self.pred_landmarks_eval, self.test_landmarks,
|
276 |
+
# self.normalize_loss_by_eyes)
|
277 |
+
|
278 |
+
# def predict_landmarks_in_batches(self,image_paths,session):
|
279 |
+
#
|
280 |
+
# num_batches = int(1.*len(image_paths)/self.batch_size)
|
281 |
+
# if num_batches == 0:
|
282 |
+
# batch_size = len(image_paths)
|
283 |
+
# num_batches = 1
|
284 |
+
# else:
|
285 |
+
# batch_size = self.batch_size
|
286 |
+
#
|
287 |
+
# for i in range(num_batches):
|
288 |
+
# batch_image_paths = image_paths[i * batch_size:(i + 1) * batch_size]
|
289 |
+
# batch_images, _ = \
|
290 |
+
# load_data(batch_image_paths, None, self.image_size, self.num_landmarks, conv=True)
|
291 |
+
# if i == 0:
|
292 |
+
# all_pred_landmarks = session.run(self.pred_landmarks_eval,{self.test_images:batch_images})
|
293 |
+
# else:
|
294 |
+
# batch_pred = session.run(self.pred_landmarks_eval,{self.test_images:batch_images})
|
295 |
+
# all_pred_landmarks = np.concatenate((all_pred_landmarks,batch_pred),0)
|
296 |
+
#
|
297 |
+
# reminder = len(image_paths)-num_batches*batch_size
|
298 |
+
# if reminder >0:
|
299 |
+
# reminder_paths = image_paths[-reminder:]
|
300 |
+
# batch_images, _ = \
|
301 |
+
# load_data(reminder_paths, None, self.image_size, self.num_landmarks, conv=True)
|
302 |
+
# batch_pred = session.run(self.pred_landmarks_eval,{self.test_images:batch_images})
|
303 |
+
# all_pred_landmarks = np.concatenate((all_pred_landmarks, batch_pred), 0)
|
304 |
+
#
|
305 |
+
# return all_pred_landmarks
|
306 |
+
|
307 |
+
def create_summary_ops(self):
|
308 |
+
|
309 |
+
var_summary = [tf.summary.histogram(var.name,var) for var in tf.trainable_variables()]
|
310 |
+
grads = tf.gradients(self.total_loss, tf.trainable_variables())
|
311 |
+
grads = list(zip(grads, tf.trainable_variables()))
|
312 |
+
grad_summary = [tf.summary.histogram(var.name+'/grads',grad) for grad,var in grads]
|
313 |
+
activ_summary = [tf.summary.histogram(layer.name, layer) for layer in self.all_layers]
|
314 |
+
l2_primary = tf.summary.scalar('l2_primary', self.l2_primary)
|
315 |
+
l2_fusion = tf.summary.scalar('l2_fusion', self.l2_fusion)
|
316 |
+
l_total = tf.summary.scalar('l_total', self.total_loss)
|
317 |
+
|
318 |
+
if self.log_histograms:
|
319 |
+
self.batch_summary_op = tf.summary.merge([l2_primary, l2_fusion, l_total, var_summary, grad_summary,
|
320 |
+
activ_summary])
|
321 |
+
else:
|
322 |
+
self.batch_summary_op = tf.summary.merge([l2_primary, l2_fusion, l_total])
|
323 |
+
|
324 |
+
# l2_train_loss_summary = tf.summary.scalar('l2_loss_train', self.l2_loss_train)
|
325 |
+
# l2_valid_loss_summary = tf.summary.scalar('l2_loss_valid', self.l2_loss_valid)
|
326 |
+
#
|
327 |
+
# self.epoch_summary_op = tf.summary.merge([l2_train_loss_summary, l2_valid_loss_summary])
|
328 |
+
|
329 |
+
def eval(self):
|
330 |
+
|
331 |
+
self.add_placeholders()
|
332 |
+
# build model
|
333 |
+
self.build_model()
|
334 |
+
|
335 |
+
num_images = len(self.img_menpo_list)
|
336 |
+
img_inds = np.arange(num_images)
|
337 |
+
|
338 |
+
sample_iter = int(1. * len(num_images) / self.sample_grid)
|
339 |
+
|
340 |
+
if self.max_test_sample is not None:
|
341 |
+
if self.max_test_sample < sample_iter:
|
342 |
+
sample_iter = self.max_test_sample
|
343 |
+
|
344 |
+
with tf.Session(config=self.config) as sess:
|
345 |
+
|
346 |
+
# load trained parameters
|
347 |
+
print ('loading test model...')
|
348 |
+
saver = tf.train.Saver()
|
349 |
+
saver.restore(sess, self.test_model_path)
|
350 |
+
|
351 |
+
_, model_name = os.path.split(self.test_model_path)
|
352 |
+
|
353 |
+
# if self.new_test_data is False:
|
354 |
+
# # create loss ops
|
355 |
+
# self.create_loss_ops()
|
356 |
+
#
|
357 |
+
# all_test_pred_landmarks = self.predict_landmarks_in_batches(test_data_paths, session=sess)
|
358 |
+
# _, all_test_real_landmarks = load_data(None, test_landmarks_paths, self.image_size,
|
359 |
+
# self.num_landmarks, conv=True)
|
360 |
+
# all_test_loss = sess.run(self.l2_loss_test, {self.pred_landmarks_eval: all_test_pred_landmarks,
|
361 |
+
# self.test_landmarks: all_test_real_landmarks})
|
362 |
+
# with open(os.path.join(self.save_log_path, model_name+'-test_loss.txt'), 'w') as f:
|
363 |
+
# f.write(str(all_test_loss))
|
364 |
+
|
365 |
+
for i in range(sample_iter):
|
366 |
+
|
367 |
+
batch_inds = img_inds[i * self.sample_grid:(i + 1) * self.sample_grid]
|
368 |
+
|
369 |
+
batch_images, _, _, _ = \
|
370 |
+
load_data(self.img_menpo_list, batch_inds, image_size=self.image_size, c_dim=self.c_dim,
|
371 |
+
num_landmarks=self.num_landmarks, sigma=self.sigma, scale=self.scale,
|
372 |
+
save_landmarks=False)
|
373 |
+
|
374 |
+
batch_maps_pred, batch_maps_small_pred = sess.run([self.pred_hm_f, self.pred_hm_p],
|
375 |
+
{self.test_images: batch_images})
|
376 |
+
|
377 |
+
sample_path_imgs = os.path.join(self.save_sample_path, model_name + '-sample-%d-to-%d-1.png' % (
|
378 |
+
i * self.sample_grid, (i + 1) * self.sample_grid))
|
379 |
+
|
380 |
+
sample_path_maps = os.path.join(self.save_sample_path, model_name + '-sample-%d-to-%d-2.png' % (
|
381 |
+
i * self.sample_grid, (i + 1) * self.sample_grid))
|
382 |
+
|
383 |
+
merged_img = merge_images_landmarks_maps(
|
384 |
+
batch_images, batch_maps_pred, image_size=self.image_size,
|
385 |
+
num_landmarks=self.num_landmarks, num_samples=self.sample_grid, scale=self.scale)
|
386 |
+
|
387 |
+
merged_map = merge_compare_maps(
|
388 |
+
batch_maps_small_pred, batch_maps_pred, image_size=self.image_size/4,
|
389 |
+
num_landmarks=self.num_landmarks, num_samples=self.sample_grid)
|
390 |
+
|
391 |
+
scipy.misc.imsave(sample_path_imgs, merged_img)
|
392 |
+
scipy.misc.imsave(sample_path_maps, merged_map)
|
393 |
+
|
394 |
+
print ('saved %s' % sample_path_imgs)
|
395 |
+
|
396 |
+
def train(self):
|
397 |
+
tf.set_random_seed(1234)
|
398 |
+
# build a graph
|
399 |
+
# add placeholders
|
400 |
+
self.add_placeholders()
|
401 |
+
# build model
|
402 |
+
self.build_model()
|
403 |
+
# create loss ops
|
404 |
+
self.create_loss_ops()
|
405 |
+
# create summary ops
|
406 |
+
self.create_summary_ops()
|
407 |
+
|
408 |
+
# create optimizer and training op
|
409 |
+
global_step = tf.Variable(0, trainable=False)
|
410 |
+
lr = tf.train.exponential_decay(self.learning_rate,global_step, self.step, self.gamma, staircase=True)
|
411 |
+
optimizer = tf.train.MomentumOptimizer(lr,self.momentum)
|
412 |
+
|
413 |
+
train_op = optimizer.minimize(self.total_loss,global_step=global_step)
|
414 |
+
|
415 |
+
with tf.Session(config=self.config) as sess:
|
416 |
+
|
417 |
+
tf.global_variables_initializer().run()
|
418 |
+
|
419 |
+
# create model saver and file writer
|
420 |
+
summary_writer = tf.summary.FileWriter(logdir=self.save_log_path, graph=tf.get_default_graph())
|
421 |
+
saver = tf.train.Saver()
|
422 |
+
|
423 |
+
print
|
424 |
+
print('*** Start Training ***')
|
425 |
+
|
426 |
+
# set random seed
|
427 |
+
epoch = 0
|
428 |
+
print_epoch=True
|
429 |
+
|
430 |
+
num_train_images = len(self.img_menpo_list)
|
431 |
+
num_train_images=10
|
432 |
+
img_inds = np.arange(num_train_images)
|
433 |
+
np.random.shuffle(img_inds)
|
434 |
+
|
435 |
+
for step in range(self.train_iter + 1):
|
436 |
+
|
437 |
+
# get batch images
|
438 |
+
j = step % int(float(num_train_images) / float(self.batch_size))
|
439 |
+
|
440 |
+
if step > 0 and j == 0:
|
441 |
+
np.random.shuffle(img_inds) # shuffle data if finished epoch
|
442 |
+
epoch += 1
|
443 |
+
print_epoch=True
|
444 |
+
|
445 |
+
batch_inds = img_inds[j * self.batch_size:(j + 1) * self.batch_size]
|
446 |
+
|
447 |
+
batch_images, batch_maps, batch_maps_small, _ =\
|
448 |
+
load_data(self.img_menpo_list, batch_inds, image_size=self.image_size, c_dim=self.c_dim,
|
449 |
+
num_landmarks=self.num_landmarks, sigma=self.sigma, scale=self.scale, save_landmarks=False)
|
450 |
+
|
451 |
+
feed_dict_train = {self.train_images: batch_images, self.train_heatmaps: batch_maps,
|
452 |
+
self.train_heatmaps_small: batch_maps_small}
|
453 |
+
|
454 |
+
sess.run(train_op, feed_dict_train)
|
455 |
+
|
456 |
+
# print loss every *log_every_epoch* epoch
|
457 |
+
# if step == 0 or (step+1) == self.train_iter or (epoch % self.log_every_epoch ==0 and print_epoch):
|
458 |
+
# if self.sample_every_epoch is not True:
|
459 |
+
# print_epoch=False
|
460 |
+
# all_train_pred_landmarks=self.predict_landmarks_in_batches(train_data_paths,session=sess)
|
461 |
+
# _,all_train_real_landmarks = load_data(None,train_landmarks_paths,self.image_size,
|
462 |
+
# self.num_landmarks, conv=True)
|
463 |
+
# all_train_loss = sess.run(self.l2_loss_train,{self.pred_landmarks_train:all_train_pred_landmarks,
|
464 |
+
# self.train_landmarks:all_train_real_landmarks})
|
465 |
+
#
|
466 |
+
# all_valid_pred_landmarks = self.predict_landmarks_in_batches(valid_data_paths,session=sess)
|
467 |
+
# _, all_valid_real_landmarks = load_data(None, valid_landmarks_paths, self.image_size,
|
468 |
+
# self.num_landmarks, conv=True)
|
469 |
+
# all_valid_loss = sess.run(self.l2_loss_valid, {self.pred_landmarks_valid: all_valid_pred_landmarks,
|
470 |
+
# self.valid_landmarks: all_valid_real_landmarks})
|
471 |
+
# print("--------- EPOCH %d ---------" % (epoch))
|
472 |
+
# print ('step: [%d/%d] train loss: [%.6f] valid loss: [%.6f]'
|
473 |
+
# % (step + 1, self.train_iter, all_train_loss, all_valid_loss))
|
474 |
+
# print("----------------------------")
|
475 |
+
# summary= sess.run(self.epoch_summary_op,{self.l2_loss_valid:all_valid_loss,self.l2_loss_train:all_train_loss})
|
476 |
+
# summary_writer.add_summary(summary, epoch)
|
477 |
+
|
478 |
+
# save to log and print status
|
479 |
+
if step == 0 or (step + 1) % self.print_every == 0:
|
480 |
+
|
481 |
+
summary, l_p, l_f, l_t = sess.run(
|
482 |
+
[self.batch_summary_op, self.l2_primary,self.l2_fusion,self.total_loss],
|
483 |
+
feed_dict_train)
|
484 |
+
|
485 |
+
summary_writer.add_summary(summary, step)
|
486 |
+
|
487 |
+
print ('epoch: [%d] step: [%d/%d] primary loss: [%.6f] fusion loss: [%.6f] total loss: [%.6f]'
|
488 |
+
% (epoch, step + 1, self.train_iter, l_p, l_f, l_t))
|
489 |
+
|
490 |
+
# save model
|
491 |
+
if (step + 1) % self.save_every == 0:
|
492 |
+
saver.save(sess, os.path.join(self.save_model_path, 'deep_heatmaps'), global_step=step + 1)
|
493 |
+
print ('model/deep-heatmaps-%d saved' % (step + 1))
|
494 |
+
|
495 |
+
# save images with landmarks
|
496 |
+
if self.sample_every_epoch and (epoch % self.log_every_epoch ==0 and print_epoch):
|
497 |
+
print_epoch = False
|
498 |
+
|
499 |
+
# train_pred = sess.run(self.pred_landmarks_eval, {self.test_images: batch_images})
|
500 |
+
# valid_pred = sess.run(self.pred_landmarks_eval, {self.test_images: valid_images_sample})
|
501 |
+
#
|
502 |
+
# train_sample_path = os.path.join(self.save_sample_path, 'train-epoch-%d.png' % (epoch))
|
503 |
+
# valid_sample_path = os.path.join(self.save_sample_path, 'valid-epoch-%d.png' % (epoch))
|
504 |
+
#
|
505 |
+
# merge_images_train = merge_images_with_landmarks(batch_images, train_pred, self.image_size,
|
506 |
+
# self.num_landmarks, self.sample_grid)
|
507 |
+
# merge_images_valid = merge_images_with_landmarks(valid_images_sample, valid_pred,
|
508 |
+
# self.image_size, self.num_landmarks,
|
509 |
+
# self.sample_grid)
|
510 |
+
#
|
511 |
+
# scipy.misc.imsave(train_sample_path, merge_images_train)
|
512 |
+
# scipy.misc.imsave(valid_sample_path, merge_images_valid)
|
513 |
+
|
514 |
+
elif (self.sample_every_epoch is False) and (step == 0 or (step + 1) % self.sample_every == 0):
|
515 |
+
|
516 |
+
batch_maps_pred, batch_maps_small_pred = sess.run([self.pred_hm_f, self.pred_hm_p],
|
517 |
+
{self.train_images: batch_images})
|
518 |
+
|
519 |
+
print 'map vals', batch_maps_pred.min(), batch_maps_pred.max()
|
520 |
+
print 'small map vals', batch_maps_small_pred.min(), batch_maps_small_pred.max()
|
521 |
+
|
522 |
+
sample_path_imgs = os.path.join(self.save_sample_path,'epoch-%d-train-iter-%d-1.png' % (epoch, step + 1))
|
523 |
+
sample_path_maps = os.path.join(self.save_sample_path,'epoch-%d-train-iter-%d-2.png' % (epoch, step + 1))
|
524 |
+
|
525 |
+
merged_img = merge_images_landmarks_maps(
|
526 |
+
batch_images, batch_maps_pred, image_size=self.image_size,
|
527 |
+
num_landmarks=self.num_landmarks, num_samples=self.sample_grid, scale=self.scale)
|
528 |
+
|
529 |
+
merged_map = merge_compare_maps(
|
530 |
+
batch_maps_small_pred, batch_maps_pred, image_size=self.image_size/4,
|
531 |
+
num_landmarks=self.num_landmarks, num_samples=self.sample_grid)
|
532 |
+
|
533 |
+
scipy.misc.imsave(sample_path_imgs, merged_img)
|
534 |
+
scipy.misc.imsave(sample_path_maps, merged_map)
|
535 |
+
|
536 |
+
print('*** Finished Training ***')
|
537 |
+
# evaluate model on test set
|
538 |
+
# all_test_pred_landmarks = self.predict_landmarks_in_batches(test_data_paths,session=sess)
|
539 |
+
# _, all_test_real_landmarks = load_data(None, test_landmarks_paths, self.image_size,
|
540 |
+
# self.num_landmarks, conv=True)
|
541 |
+
# all_test_loss = sess.run(self.l2_loss_test, {self.pred_landmarks_test: all_test_pred_landmarks,
|
542 |
+
# self.test_landmarks: all_test_real_landmarks})
|
543 |
+
#
|
544 |
+
# print ('step: [%d/%d] test loss: [%.6f]' % (step, self.train_iter, all_test_loss))
|
MakeItTalk/thirdparty/face_of_art/old/deep_heatmaps_model_primary.py
CHANGED
@@ -1,392 +1,392 @@
|
|
1 |
-
import scipy.io
|
2 |
-
import scipy.misc
|
3 |
-
from glob import glob
|
4 |
-
import os
|
5 |
-
import numpy as np
|
6 |
-
from image_utils import *
|
7 |
-
from ops import *
|
8 |
-
from sklearn.model_selection import train_test_split
|
9 |
-
import tensorflow as tf
|
10 |
-
from tensorflow import contrib
|
11 |
-
|
12 |
-
|
13 |
-
class DeepHeatmapsModel(object):
|
14 |
-
|
15 |
-
"""facial landmark localization Network"""
|
16 |
-
|
17 |
-
def __init__(self, mode='TRAIN', train_iter=500000, learning_rate=1e-8, image_size=256, c_dim=3, batch_size=10,
|
18 |
-
num_landmarks=68, augment=True, img_path='data', save_log_path='logs', save_sample_path='sample',
|
19 |
-
save_model_path='model',test_model_path='model/deep_heatmaps_primary-1000'):
|
20 |
-
|
21 |
-
self.mode = mode
|
22 |
-
self.train_iter=train_iter
|
23 |
-
self.learning_rate=learning_rate
|
24 |
-
|
25 |
-
self.image_size = image_size
|
26 |
-
self.c_dim = c_dim
|
27 |
-
self.batch_size = batch_size
|
28 |
-
|
29 |
-
self.num_landmarks = num_landmarks
|
30 |
-
|
31 |
-
self.save_log_path=save_log_path
|
32 |
-
self.save_sample_path=save_sample_path
|
33 |
-
self.save_model_path=save_model_path
|
34 |
-
self.test_model_path=test_model_path
|
35 |
-
self.img_path=img_path
|
36 |
-
|
37 |
-
self.momentum = 0.95
|
38 |
-
self.step = 80000 # for lr decay
|
39 |
-
self.gamma = 0.1 # for lr decay
|
40 |
-
|
41 |
-
self.weight_initializer = 'xavier' # random_normal or xavier
|
42 |
-
self.weight_initializer_std = 0.01
|
43 |
-
self.bias_initializer = 0.0
|
44 |
-
|
45 |
-
self.sigma = 1.5 # sigma for heatmap generation
|
46 |
-
self.scale = '1' # scale for image normalization '255' / '1' / '0'
|
47 |
-
|
48 |
-
self.print_every=1
|
49 |
-
self.save_every=5000
|
50 |
-
self.sample_every_epoch = False
|
51 |
-
self.sample_every=5
|
52 |
-
self.sample_grid=9
|
53 |
-
self.log_every_epoch=1
|
54 |
-
self.log_histograms = True
|
55 |
-
|
56 |
-
self.config = tf.ConfigProto()
|
57 |
-
self.config.gpu_options.allow_growth = True
|
58 |
-
|
59 |
-
bb_dir = os.path.join(img_path,'Bounding_Boxes')
|
60 |
-
self.test_data ='test' # if mode is TEST, this choose the set to use full/common/challenging/test
|
61 |
-
margin = 0.25 # for face crops
|
62 |
-
bb_type = 'gt' # gt/init
|
63 |
-
|
64 |
-
self.debug = False
|
65 |
-
self.debug_data_size = 20
|
66 |
-
self.compute_nme = True
|
67 |
-
|
68 |
-
self.bb_dictionary = load_bb_dictionary(bb_dir, mode, test_data=self.test_data)
|
69 |
-
|
70 |
-
self.img_menpo_list = load_menpo_image_list(img_path, mode, self.bb_dictionary, image_size, augment=augment,
|
71 |
-
margin=margin, bb_type=bb_type, test_data=self.test_data)
|
72 |
-
|
73 |
-
if mode is 'TRAIN':
|
74 |
-
train_params = locals()
|
75 |
-
print_training_params_to_file(train_params)
|
76 |
-
|
77 |
-
def add_placeholders(self):
|
78 |
-
|
79 |
-
if self.mode == 'TEST':
|
80 |
-
self.test_images = tf.placeholder(
|
81 |
-
tf.float32, [None, self.image_size, self.image_size, self.c_dim], 'images')
|
82 |
-
|
83 |
-
self.test_heatmaps_small = tf.placeholder(
|
84 |
-
tf.float32, [None, self.image_size/4, self.image_size/4, self.num_landmarks], 'heatmaps_small')
|
85 |
-
|
86 |
-
elif self.mode == 'TRAIN':
|
87 |
-
self.train_images = tf.placeholder(
|
88 |
-
tf.float32, [None, self.image_size, self.image_size, self.c_dim], 'train_images')
|
89 |
-
|
90 |
-
self.train_heatmaps_small = tf.placeholder(
|
91 |
-
tf.float32, [None, self.image_size/4, self.image_size/4, self.num_landmarks], 'train_heatmaps_small')
|
92 |
-
|
93 |
-
if self.compute_nme:
|
94 |
-
self.train_lms_small = tf.placeholder(tf.float32, [None, self.num_landmarks, 2], 'train_lms_small')
|
95 |
-
self.pred_lms_small = tf.placeholder(tf.float32, [None, self.num_landmarks, 2], 'pred_lms_small')
|
96 |
-
|
97 |
-
|
98 |
-
def heatmaps_network(self, input_images, reuse=None, name='pred_heatmaps'):
|
99 |
-
|
100 |
-
with tf.name_scope(name):
|
101 |
-
|
102 |
-
if self.weight_initializer == 'xavier':
|
103 |
-
weight_initializer = contrib.layers.xavier_initializer()
|
104 |
-
else:
|
105 |
-
weight_initializer = tf.random_normal_initializer(stddev=self.weight_initializer_std)
|
106 |
-
|
107 |
-
bias_init = tf.constant_initializer(self.bias_initializer)
|
108 |
-
|
109 |
-
with tf.variable_scope('heatmaps_network'):
|
110 |
-
with tf.name_scope('primary_net'):
|
111 |
-
|
112 |
-
l1 = conv_relu_pool(input_images, 5, 128, conv_ker_init=weight_initializer, conv_bias_init=bias_init,
|
113 |
-
reuse=reuse, var_scope='conv_1')
|
114 |
-
l2 = conv_relu_pool(l1, 5, 128, conv_ker_init=weight_initializer, conv_bias_init=bias_init,
|
115 |
-
reuse=reuse, var_scope='conv_2')
|
116 |
-
l3 = conv_relu(l2, 5, 128, conv_ker_init=weight_initializer, conv_bias_init=bias_init,
|
117 |
-
reuse=reuse, var_scope='conv_3')
|
118 |
-
|
119 |
-
l4_1 = conv_relu(l3, 3, 128, conv_dilation=1, conv_ker_init=weight_initializer,
|
120 |
-
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_4_1')
|
121 |
-
l4_2 = conv_relu(l3, 3, 128, conv_dilation=2, conv_ker_init=weight_initializer,
|
122 |
-
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_4_2')
|
123 |
-
l4_3 = conv_relu(l3, 3, 128, conv_dilation=3, conv_ker_init=weight_initializer,
|
124 |
-
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_4_3')
|
125 |
-
l4_4 = conv_relu(l3, 3, 128, conv_dilation=4, conv_ker_init=weight_initializer,
|
126 |
-
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_4_4')
|
127 |
-
|
128 |
-
l4 = tf.concat([l4_1, l4_2, l4_3, l4_4], 3, name='conv_4')
|
129 |
-
|
130 |
-
l5_1 = conv_relu(l4, 3, 256, conv_dilation=1, conv_ker_init=weight_initializer,
|
131 |
-
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_5_1')
|
132 |
-
l5_2 = conv_relu(l4, 3, 256, conv_dilation=2, conv_ker_init=weight_initializer,
|
133 |
-
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_5_2')
|
134 |
-
l5_3 = conv_relu(l4, 3, 256, conv_dilation=3, conv_ker_init=weight_initializer,
|
135 |
-
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_5_3')
|
136 |
-
l5_4 = conv_relu(l4, 3, 256, conv_dilation=4, conv_ker_init=weight_initializer,
|
137 |
-
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_5_4')
|
138 |
-
|
139 |
-
l5 = tf.concat([l5_1, l5_2, l5_3, l5_4], 3, name='conv_5')
|
140 |
-
|
141 |
-
l6 = conv_relu(l5, 1, 512, conv_ker_init=weight_initializer,
|
142 |
-
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_6')
|
143 |
-
l7 = conv_relu(l6, 1, 256, conv_ker_init=weight_initializer,
|
144 |
-
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_7')
|
145 |
-
primary_out = conv(l7, 1, self.num_landmarks, conv_ker_init=weight_initializer,
|
146 |
-
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_8')
|
147 |
-
|
148 |
-
self.all_layers = [l1, l2, l3, l4, l5, l6, l7, primary_out]
|
149 |
-
|
150 |
-
return primary_out
|
151 |
-
|
152 |
-
def build_model(self):
|
153 |
-
if self.mode == 'TEST':
|
154 |
-
self.pred_hm_p = self.heatmaps_network(self.test_images)
|
155 |
-
elif self.mode == 'TRAIN':
|
156 |
-
self.pred_hm_p = self.heatmaps_network(self.train_images,name='pred_heatmaps_train')
|
157 |
-
|
158 |
-
def create_loss_ops(self):
|
159 |
-
|
160 |
-
def l2_loss_norm_eyes(pred_landmarks, real_landmarks, normalize=True, name='NME_loss'):
|
161 |
-
|
162 |
-
with tf.name_scope(name):
|
163 |
-
with tf.name_scope('real_pred_landmarks_diff'):
|
164 |
-
landmarks_diff = pred_landmarks - real_landmarks
|
165 |
-
|
166 |
-
if normalize:
|
167 |
-
with tf.name_scope('inter_pupil_dist'):
|
168 |
-
with tf.name_scope('left_eye'):
|
169 |
-
p1 = tf.reduce_mean(tf.slice(real_landmarks, [0, 42, 0], [-1, 6, 2]), axis=1)
|
170 |
-
with tf.name_scope('right_eye'):
|
171 |
-
p2 = tf.reduce_mean(tf.slice(real_landmarks, [0, 36, 0], [-1, 6, 2]), axis=1)
|
172 |
-
eps = 1e-6
|
173 |
-
eye_dist = tf.expand_dims(tf.expand_dims(
|
174 |
-
tf.sqrt(tf.reduce_sum(tf.square(p1 - p2), axis=1)) + eps, axis=1), axis=1)
|
175 |
-
|
176 |
-
norm_landmarks_diff = landmarks_diff / eye_dist
|
177 |
-
l2_landmarks_norm = tf.reduce_mean(tf.square(norm_landmarks_diff))
|
178 |
-
|
179 |
-
out = l2_landmarks_norm
|
180 |
-
else:
|
181 |
-
l2_landmarks = tf.reduce_mean(tf.square(landmarks_diff))
|
182 |
-
out = l2_landmarks
|
183 |
-
|
184 |
-
return out
|
185 |
-
|
186 |
-
if self.mode is 'TRAIN':
|
187 |
-
primary_maps_diff = self.pred_hm_p-self.train_heatmaps_small
|
188 |
-
self.total_loss = 1000.*tf.reduce_mean(tf.square(primary_maps_diff))
|
189 |
-
# self.total_loss = self.l2_primary
|
190 |
-
|
191 |
-
if self.compute_nme:
|
192 |
-
self.nme_loss = l2_loss_norm_eyes(self.pred_lms_small,self.train_lms_small)
|
193 |
-
else:
|
194 |
-
self.nme_loss = tf.constant(0.)
|
195 |
-
|
196 |
-
def create_summary_ops(self):
|
197 |
-
|
198 |
-
var_summary = [tf.summary.histogram(var.name,var) for var in tf.trainable_variables()]
|
199 |
-
grads = tf.gradients(self.total_loss, tf.trainable_variables())
|
200 |
-
grads = list(zip(grads, tf.trainable_variables()))
|
201 |
-
grad_summary = [tf.summary.histogram(var.name+'/grads',grad) for grad,var in grads]
|
202 |
-
activ_summary = [tf.summary.histogram(layer.name, layer) for layer in self.all_layers]
|
203 |
-
l_total = tf.summary.scalar('l_total', self.total_loss)
|
204 |
-
l_nme = tf.summary.scalar('l_nme', self.nme_loss)
|
205 |
-
|
206 |
-
if self.log_histograms:
|
207 |
-
self.batch_summary_op = tf.summary.merge([l_total, l_nme, var_summary, grad_summary,
|
208 |
-
activ_summary])
|
209 |
-
else:
|
210 |
-
self.batch_summary_op = tf.summary.merge([l_total, l_nme])
|
211 |
-
|
212 |
-
def eval(self):
|
213 |
-
|
214 |
-
self.add_placeholders()
|
215 |
-
# build model
|
216 |
-
self.build_model()
|
217 |
-
|
218 |
-
num_images = len(self.img_menpo_list)
|
219 |
-
img_inds = np.arange(num_images)
|
220 |
-
|
221 |
-
sample_iter = int(1. * num_images / self.sample_grid)
|
222 |
-
|
223 |
-
with tf.Session(config=self.config) as sess:
|
224 |
-
|
225 |
-
# load trained parameters
|
226 |
-
print ('loading test model...')
|
227 |
-
saver = tf.train.Saver()
|
228 |
-
saver.restore(sess, self.test_model_path)
|
229 |
-
|
230 |
-
_, model_name = os.path.split(self.test_model_path)
|
231 |
-
|
232 |
-
for i in range(sample_iter):
|
233 |
-
|
234 |
-
batch_inds = img_inds[i * self.sample_grid:(i + 1) * self.sample_grid]
|
235 |
-
|
236 |
-
batch_images, _, batch_maps_gt, _ = \
|
237 |
-
load_data(self.img_menpo_list, batch_inds, image_size=self.image_size, c_dim=self.c_dim,
|
238 |
-
num_landmarks=self.num_landmarks, sigma=self.sigma, scale=self.scale,
|
239 |
-
save_landmarks=False, primary=True)
|
240 |
-
|
241 |
-
batch_maps_small_pred = sess.run(self.pred_hm_p, {self.test_images: batch_images})
|
242 |
-
|
243 |
-
sample_path_imgs = os.path.join(self.save_sample_path, model_name +'-'+ self.test_data+'-sample-%d-to-%d-1.png' % (
|
244 |
-
i * self.sample_grid, (i + 1) * self.sample_grid))
|
245 |
-
|
246 |
-
sample_path_maps = os.path.join(self.save_sample_path, model_name +'-'+ self.test_data+ '-sample-%d-to-%d-2.png' % (
|
247 |
-
i * self.sample_grid, (i + 1) * self.sample_grid))
|
248 |
-
|
249 |
-
sample_path_channels = os.path.join(self.save_sample_path, model_name +'-'+ self.test_data+ '-sample-%d-to-%d-3.png' % (
|
250 |
-
i * self.sample_grid, (i + 1) * self.sample_grid))
|
251 |
-
|
252 |
-
merged_img = merge_images_landmarks_maps(
|
253 |
-
batch_images, batch_maps_small_pred, image_size=self.image_size,
|
254 |
-
num_landmarks=self.num_landmarks, num_samples=self.sample_grid,
|
255 |
-
scale=self.scale,circle_size=0)
|
256 |
-
|
257 |
-
merged_map = merge_compare_maps(
|
258 |
-
batch_maps_gt, batch_maps_small_pred,image_size=self.image_size/4,
|
259 |
-
num_landmarks=self.num_landmarks, num_samples=self.sample_grid)
|
260 |
-
|
261 |
-
map_per_channel = map_comapre_channels(
|
262 |
-
batch_images, batch_maps_small_pred,batch_maps_gt, image_size=self.image_size / 4,
|
263 |
-
num_landmarks=self.num_landmarks, scale=self.scale)
|
264 |
-
|
265 |
-
scipy.misc.imsave(sample_path_imgs, merged_img)
|
266 |
-
scipy.misc.imsave(sample_path_maps, merged_map)
|
267 |
-
scipy.misc.imsave(sample_path_channels, map_per_channel)
|
268 |
-
|
269 |
-
print ('saved %s' % sample_path_imgs)
|
270 |
-
|
271 |
-
def train(self):
|
272 |
-
tf.set_random_seed(1234)
|
273 |
-
np.random.seed(1234)
|
274 |
-
# build a graph
|
275 |
-
# add placeholders
|
276 |
-
self.add_placeholders()
|
277 |
-
# build model
|
278 |
-
self.build_model()
|
279 |
-
# create loss ops
|
280 |
-
self.create_loss_ops()
|
281 |
-
# create summary ops
|
282 |
-
self.create_summary_ops()
|
283 |
-
|
284 |
-
# create optimizer and training op
|
285 |
-
global_step = tf.Variable(0, trainable=False)
|
286 |
-
lr = tf.train.exponential_decay(self.learning_rate,global_step, self.step, self.gamma, staircase=True)
|
287 |
-
optimizer = tf.train.MomentumOptimizer(lr,self.momentum)
|
288 |
-
|
289 |
-
train_op = optimizer.minimize(self.total_loss,global_step=global_step)
|
290 |
-
|
291 |
-
with tf.Session(config=self.config) as sess:
|
292 |
-
|
293 |
-
tf.global_variables_initializer().run()
|
294 |
-
|
295 |
-
# create model saver and file writer
|
296 |
-
summary_writer = tf.summary.FileWriter(logdir=self.save_log_path, graph=tf.get_default_graph())
|
297 |
-
saver = tf.train.Saver()
|
298 |
-
|
299 |
-
print
|
300 |
-
print('*** Start Training ***')
|
301 |
-
|
302 |
-
# set random seed
|
303 |
-
epoch = 0
|
304 |
-
|
305 |
-
num_train_images = len(self.img_menpo_list)
|
306 |
-
if self.debug:
|
307 |
-
num_train_images=self.debug_data_size
|
308 |
-
|
309 |
-
img_inds = np.arange(num_train_images)
|
310 |
-
np.random.shuffle(img_inds)
|
311 |
-
|
312 |
-
for step in range(self.train_iter + 1):
|
313 |
-
|
314 |
-
# get batch images
|
315 |
-
j = step % int(float(num_train_images) / float(self.batch_size))
|
316 |
-
|
317 |
-
if step > 0 and j == 0:
|
318 |
-
np.random.shuffle(img_inds) # shuffle data if finished epoch
|
319 |
-
epoch += 1
|
320 |
-
|
321 |
-
batch_inds = img_inds[j * self.batch_size:(j + 1) * self.batch_size]
|
322 |
-
|
323 |
-
batch_images, _, batch_maps_small, batch_lms_small =\
|
324 |
-
load_data(self.img_menpo_list, batch_inds, image_size=self.image_size, c_dim=self.c_dim,
|
325 |
-
num_landmarks=self.num_landmarks, sigma=self.sigma, scale=self.scale,
|
326 |
-
save_landmarks=self.compute_nme, primary=True)
|
327 |
-
|
328 |
-
feed_dict_train = {self.train_images: batch_images, self.train_heatmaps_small: batch_maps_small}
|
329 |
-
|
330 |
-
sess.run(train_op, feed_dict_train)
|
331 |
-
|
332 |
-
# save to log and print status
|
333 |
-
if step == 0 or (step + 1) % self.print_every == 0:
|
334 |
-
|
335 |
-
if self.compute_nme:
|
336 |
-
batch_maps_small_pred = sess.run(self.pred_hm_p, {self.train_images: batch_images})
|
337 |
-
pred_lms_small = batch_heat_maps_to_image(
|
338 |
-
batch_maps_small_pred, self.batch_size, image_size=self.image_size/4,
|
339 |
-
num_landmarks=self.num_landmarks)
|
340 |
-
|
341 |
-
feed_dict_log = {
|
342 |
-
self.train_images: batch_images, self.train_heatmaps_small: batch_maps_small,
|
343 |
-
self.train_lms_small: batch_lms_small, self.pred_lms_small: pred_lms_small}
|
344 |
-
else:
|
345 |
-
feed_dict_log = feed_dict_train
|
346 |
-
|
347 |
-
summary, l_t,l_nme = sess.run([self.batch_summary_op, self.total_loss, self.nme_loss],
|
348 |
-
feed_dict_log)
|
349 |
-
|
350 |
-
summary_writer.add_summary(summary, step)
|
351 |
-
|
352 |
-
print ('epoch: [%d] step: [%d/%d] primary loss: [%.6f] nme loss: [%.6f] ' % (
|
353 |
-
epoch, step + 1, self.train_iter, l_t, l_nme))
|
354 |
-
|
355 |
-
# save model
|
356 |
-
if (step + 1) % self.save_every == 0:
|
357 |
-
saver.save(sess, os.path.join(self.save_model_path, 'deep_heatmaps'), global_step=step + 1)
|
358 |
-
print ('model/deep-heatmaps-%d saved' % (step + 1))
|
359 |
-
|
360 |
-
# save images with landmarks
|
361 |
-
if (self.sample_every_epoch is False) and (step == 0 or (step + 1) % self.sample_every == 0):
|
362 |
-
|
363 |
-
if not self.compute_nme:
|
364 |
-
batch_maps_small_pred = sess.run(self.pred_hm_p, {self.train_images: batch_images})
|
365 |
-
|
366 |
-
print 'small map vals', batch_maps_small_pred.min(), batch_maps_small_pred.max()
|
367 |
-
|
368 |
-
sample_path_imgs = os.path.join(self.save_sample_path,'epoch-%d-train-iter-%d-1.png'
|
369 |
-
% (epoch, step + 1))
|
370 |
-
sample_path_maps = os.path.join(self.save_sample_path,'epoch-%d-train-iter-%d-2.png'
|
371 |
-
% (epoch, step + 1))
|
372 |
-
sample_path_ch_maps = os.path.join(self.save_sample_path, 'epoch-%d-train-iter-%d-3.png'
|
373 |
-
% (epoch, step + 1))
|
374 |
-
|
375 |
-
merged_img = merge_images_landmarks_maps(
|
376 |
-
batch_images, batch_maps_small_pred, image_size=self.image_size,
|
377 |
-
num_landmarks=self.num_landmarks, num_samples=self.sample_grid, scale=self.scale,
|
378 |
-
circle_size=0)
|
379 |
-
|
380 |
-
merged_map = merge_compare_maps(
|
381 |
-
batch_maps_small_pred, batch_maps_small, image_size=self.image_size/4,
|
382 |
-
num_landmarks=self.num_landmarks, num_samples=self.sample_grid)
|
383 |
-
|
384 |
-
map_per_channel = map_comapre_channels(batch_images, batch_maps_small_pred, batch_maps_small,
|
385 |
-
image_size=self.image_size/4,
|
386 |
-
num_landmarks=self.num_landmarks,scale=self.scale)
|
387 |
-
|
388 |
-
scipy.misc.imsave(sample_path_imgs, merged_img)
|
389 |
-
scipy.misc.imsave(sample_path_maps, merged_map)
|
390 |
-
scipy.misc.imsave(sample_path_ch_maps, map_per_channel)
|
391 |
-
|
392 |
print('*** Finished Training ***')
|
|
|
1 |
+
import scipy.io
|
2 |
+
import scipy.misc
|
3 |
+
from glob import glob
|
4 |
+
import os
|
5 |
+
import numpy as np
|
6 |
+
from image_utils import *
|
7 |
+
from ops import *
|
8 |
+
from sklearn.model_selection import train_test_split
|
9 |
+
import tensorflow as tf
|
10 |
+
from tensorflow import contrib
|
11 |
+
|
12 |
+
|
13 |
+
class DeepHeatmapsModel(object):
|
14 |
+
|
15 |
+
"""facial landmark localization Network"""
|
16 |
+
|
17 |
+
def __init__(self, mode='TRAIN', train_iter=500000, learning_rate=1e-8, image_size=256, c_dim=3, batch_size=10,
|
18 |
+
num_landmarks=68, augment=True, img_path='data', save_log_path='logs', save_sample_path='sample',
|
19 |
+
save_model_path='model',test_model_path='model/deep_heatmaps_primary-1000'):
|
20 |
+
|
21 |
+
self.mode = mode
|
22 |
+
self.train_iter=train_iter
|
23 |
+
self.learning_rate=learning_rate
|
24 |
+
|
25 |
+
self.image_size = image_size
|
26 |
+
self.c_dim = c_dim
|
27 |
+
self.batch_size = batch_size
|
28 |
+
|
29 |
+
self.num_landmarks = num_landmarks
|
30 |
+
|
31 |
+
self.save_log_path=save_log_path
|
32 |
+
self.save_sample_path=save_sample_path
|
33 |
+
self.save_model_path=save_model_path
|
34 |
+
self.test_model_path=test_model_path
|
35 |
+
self.img_path=img_path
|
36 |
+
|
37 |
+
self.momentum = 0.95
|
38 |
+
self.step = 80000 # for lr decay
|
39 |
+
self.gamma = 0.1 # for lr decay
|
40 |
+
|
41 |
+
self.weight_initializer = 'xavier' # random_normal or xavier
|
42 |
+
self.weight_initializer_std = 0.01
|
43 |
+
self.bias_initializer = 0.0
|
44 |
+
|
45 |
+
self.sigma = 1.5 # sigma for heatmap generation
|
46 |
+
self.scale = '1' # scale for image normalization '255' / '1' / '0'
|
47 |
+
|
48 |
+
self.print_every=1
|
49 |
+
self.save_every=5000
|
50 |
+
self.sample_every_epoch = False
|
51 |
+
self.sample_every=5
|
52 |
+
self.sample_grid=9
|
53 |
+
self.log_every_epoch=1
|
54 |
+
self.log_histograms = True
|
55 |
+
|
56 |
+
self.config = tf.ConfigProto()
|
57 |
+
self.config.gpu_options.allow_growth = True
|
58 |
+
|
59 |
+
bb_dir = os.path.join(img_path,'Bounding_Boxes')
|
60 |
+
self.test_data ='test' # if mode is TEST, this choose the set to use full/common/challenging/test
|
61 |
+
margin = 0.25 # for face crops
|
62 |
+
bb_type = 'gt' # gt/init
|
63 |
+
|
64 |
+
self.debug = False
|
65 |
+
self.debug_data_size = 20
|
66 |
+
self.compute_nme = True
|
67 |
+
|
68 |
+
self.bb_dictionary = load_bb_dictionary(bb_dir, mode, test_data=self.test_data)
|
69 |
+
|
70 |
+
self.img_menpo_list = load_menpo_image_list(img_path, mode, self.bb_dictionary, image_size, augment=augment,
|
71 |
+
margin=margin, bb_type=bb_type, test_data=self.test_data)
|
72 |
+
|
73 |
+
if mode is 'TRAIN':
|
74 |
+
train_params = locals()
|
75 |
+
print_training_params_to_file(train_params)
|
76 |
+
|
77 |
+
def add_placeholders(self):
|
78 |
+
|
79 |
+
if self.mode == 'TEST':
|
80 |
+
self.test_images = tf.placeholder(
|
81 |
+
tf.float32, [None, self.image_size, self.image_size, self.c_dim], 'images')
|
82 |
+
|
83 |
+
self.test_heatmaps_small = tf.placeholder(
|
84 |
+
tf.float32, [None, self.image_size/4, self.image_size/4, self.num_landmarks], 'heatmaps_small')
|
85 |
+
|
86 |
+
elif self.mode == 'TRAIN':
|
87 |
+
self.train_images = tf.placeholder(
|
88 |
+
tf.float32, [None, self.image_size, self.image_size, self.c_dim], 'train_images')
|
89 |
+
|
90 |
+
self.train_heatmaps_small = tf.placeholder(
|
91 |
+
tf.float32, [None, self.image_size/4, self.image_size/4, self.num_landmarks], 'train_heatmaps_small')
|
92 |
+
|
93 |
+
if self.compute_nme:
|
94 |
+
self.train_lms_small = tf.placeholder(tf.float32, [None, self.num_landmarks, 2], 'train_lms_small')
|
95 |
+
self.pred_lms_small = tf.placeholder(tf.float32, [None, self.num_landmarks, 2], 'pred_lms_small')
|
96 |
+
|
97 |
+
|
98 |
+
def heatmaps_network(self, input_images, reuse=None, name='pred_heatmaps'):
|
99 |
+
|
100 |
+
with tf.name_scope(name):
|
101 |
+
|
102 |
+
if self.weight_initializer == 'xavier':
|
103 |
+
weight_initializer = contrib.layers.xavier_initializer()
|
104 |
+
else:
|
105 |
+
weight_initializer = tf.random_normal_initializer(stddev=self.weight_initializer_std)
|
106 |
+
|
107 |
+
bias_init = tf.constant_initializer(self.bias_initializer)
|
108 |
+
|
109 |
+
with tf.variable_scope('heatmaps_network'):
|
110 |
+
with tf.name_scope('primary_net'):
|
111 |
+
|
112 |
+
l1 = conv_relu_pool(input_images, 5, 128, conv_ker_init=weight_initializer, conv_bias_init=bias_init,
|
113 |
+
reuse=reuse, var_scope='conv_1')
|
114 |
+
l2 = conv_relu_pool(l1, 5, 128, conv_ker_init=weight_initializer, conv_bias_init=bias_init,
|
115 |
+
reuse=reuse, var_scope='conv_2')
|
116 |
+
l3 = conv_relu(l2, 5, 128, conv_ker_init=weight_initializer, conv_bias_init=bias_init,
|
117 |
+
reuse=reuse, var_scope='conv_3')
|
118 |
+
|
119 |
+
l4_1 = conv_relu(l3, 3, 128, conv_dilation=1, conv_ker_init=weight_initializer,
|
120 |
+
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_4_1')
|
121 |
+
l4_2 = conv_relu(l3, 3, 128, conv_dilation=2, conv_ker_init=weight_initializer,
|
122 |
+
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_4_2')
|
123 |
+
l4_3 = conv_relu(l3, 3, 128, conv_dilation=3, conv_ker_init=weight_initializer,
|
124 |
+
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_4_3')
|
125 |
+
l4_4 = conv_relu(l3, 3, 128, conv_dilation=4, conv_ker_init=weight_initializer,
|
126 |
+
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_4_4')
|
127 |
+
|
128 |
+
l4 = tf.concat([l4_1, l4_2, l4_3, l4_4], 3, name='conv_4')
|
129 |
+
|
130 |
+
l5_1 = conv_relu(l4, 3, 256, conv_dilation=1, conv_ker_init=weight_initializer,
|
131 |
+
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_5_1')
|
132 |
+
l5_2 = conv_relu(l4, 3, 256, conv_dilation=2, conv_ker_init=weight_initializer,
|
133 |
+
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_5_2')
|
134 |
+
l5_3 = conv_relu(l4, 3, 256, conv_dilation=3, conv_ker_init=weight_initializer,
|
135 |
+
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_5_3')
|
136 |
+
l5_4 = conv_relu(l4, 3, 256, conv_dilation=4, conv_ker_init=weight_initializer,
|
137 |
+
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_5_4')
|
138 |
+
|
139 |
+
l5 = tf.concat([l5_1, l5_2, l5_3, l5_4], 3, name='conv_5')
|
140 |
+
|
141 |
+
l6 = conv_relu(l5, 1, 512, conv_ker_init=weight_initializer,
|
142 |
+
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_6')
|
143 |
+
l7 = conv_relu(l6, 1, 256, conv_ker_init=weight_initializer,
|
144 |
+
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_7')
|
145 |
+
primary_out = conv(l7, 1, self.num_landmarks, conv_ker_init=weight_initializer,
|
146 |
+
conv_bias_init=bias_init, reuse=reuse, var_scope='conv_8')
|
147 |
+
|
148 |
+
self.all_layers = [l1, l2, l3, l4, l5, l6, l7, primary_out]
|
149 |
+
|
150 |
+
return primary_out
|
151 |
+
|
152 |
+
def build_model(self):
|
153 |
+
if self.mode == 'TEST':
|
154 |
+
self.pred_hm_p = self.heatmaps_network(self.test_images)
|
155 |
+
elif self.mode == 'TRAIN':
|
156 |
+
self.pred_hm_p = self.heatmaps_network(self.train_images,name='pred_heatmaps_train')
|
157 |
+
|
158 |
+
def create_loss_ops(self):
|
159 |
+
|
160 |
+
def l2_loss_norm_eyes(pred_landmarks, real_landmarks, normalize=True, name='NME_loss'):
|
161 |
+
|
162 |
+
with tf.name_scope(name):
|
163 |
+
with tf.name_scope('real_pred_landmarks_diff'):
|
164 |
+
landmarks_diff = pred_landmarks - real_landmarks
|
165 |
+
|
166 |
+
if normalize:
|
167 |
+
with tf.name_scope('inter_pupil_dist'):
|
168 |
+
with tf.name_scope('left_eye'):
|
169 |
+
p1 = tf.reduce_mean(tf.slice(real_landmarks, [0, 42, 0], [-1, 6, 2]), axis=1)
|
170 |
+
with tf.name_scope('right_eye'):
|
171 |
+
p2 = tf.reduce_mean(tf.slice(real_landmarks, [0, 36, 0], [-1, 6, 2]), axis=1)
|
172 |
+
eps = 1e-6
|
173 |
+
eye_dist = tf.expand_dims(tf.expand_dims(
|
174 |
+
tf.sqrt(tf.reduce_sum(tf.square(p1 - p2), axis=1)) + eps, axis=1), axis=1)
|
175 |
+
|
176 |
+
norm_landmarks_diff = landmarks_diff / eye_dist
|
177 |
+
l2_landmarks_norm = tf.reduce_mean(tf.square(norm_landmarks_diff))
|
178 |
+
|
179 |
+
out = l2_landmarks_norm
|
180 |
+
else:
|
181 |
+
l2_landmarks = tf.reduce_mean(tf.square(landmarks_diff))
|
182 |
+
out = l2_landmarks
|
183 |
+
|
184 |
+
return out
|
185 |
+
|
186 |
+
if self.mode is 'TRAIN':
|
187 |
+
primary_maps_diff = self.pred_hm_p-self.train_heatmaps_small
|
188 |
+
self.total_loss = 1000.*tf.reduce_mean(tf.square(primary_maps_diff))
|
189 |
+
# self.total_loss = self.l2_primary
|
190 |
+
|
191 |
+
if self.compute_nme:
|
192 |
+
self.nme_loss = l2_loss_norm_eyes(self.pred_lms_small,self.train_lms_small)
|
193 |
+
else:
|
194 |
+
self.nme_loss = tf.constant(0.)
|
195 |
+
|
196 |
+
def create_summary_ops(self):
|
197 |
+
|
198 |
+
var_summary = [tf.summary.histogram(var.name,var) for var in tf.trainable_variables()]
|
199 |
+
grads = tf.gradients(self.total_loss, tf.trainable_variables())
|
200 |
+
grads = list(zip(grads, tf.trainable_variables()))
|
201 |
+
grad_summary = [tf.summary.histogram(var.name+'/grads',grad) for grad,var in grads]
|
202 |
+
activ_summary = [tf.summary.histogram(layer.name, layer) for layer in self.all_layers]
|
203 |
+
l_total = tf.summary.scalar('l_total', self.total_loss)
|
204 |
+
l_nme = tf.summary.scalar('l_nme', self.nme_loss)
|
205 |
+
|
206 |
+
if self.log_histograms:
|
207 |
+
self.batch_summary_op = tf.summary.merge([l_total, l_nme, var_summary, grad_summary,
|
208 |
+
activ_summary])
|
209 |
+
else:
|
210 |
+
self.batch_summary_op = tf.summary.merge([l_total, l_nme])
|
211 |
+
|
212 |
+
def eval(self):
|
213 |
+
|
214 |
+
self.add_placeholders()
|
215 |
+
# build model
|
216 |
+
self.build_model()
|
217 |
+
|
218 |
+
num_images = len(self.img_menpo_list)
|
219 |
+
img_inds = np.arange(num_images)
|
220 |
+
|
221 |
+
sample_iter = int(1. * num_images / self.sample_grid)
|
222 |
+
|
223 |
+
with tf.Session(config=self.config) as sess:
|
224 |
+
|
225 |
+
# load trained parameters
|
226 |
+
print ('loading test model...')
|
227 |
+
saver = tf.train.Saver()
|
228 |
+
saver.restore(sess, self.test_model_path)
|
229 |
+
|
230 |
+
_, model_name = os.path.split(self.test_model_path)
|
231 |
+
|
232 |
+
for i in range(sample_iter):
|
233 |
+
|
234 |
+
batch_inds = img_inds[i * self.sample_grid:(i + 1) * self.sample_grid]
|
235 |
+
|
236 |
+
batch_images, _, batch_maps_gt, _ = \
|
237 |
+
load_data(self.img_menpo_list, batch_inds, image_size=self.image_size, c_dim=self.c_dim,
|
238 |
+
num_landmarks=self.num_landmarks, sigma=self.sigma, scale=self.scale,
|
239 |
+
save_landmarks=False, primary=True)
|
240 |
+
|
241 |
+
batch_maps_small_pred = sess.run(self.pred_hm_p, {self.test_images: batch_images})
|
242 |
+
|
243 |
+
sample_path_imgs = os.path.join(self.save_sample_path, model_name +'-'+ self.test_data+'-sample-%d-to-%d-1.png' % (
|
244 |
+
i * self.sample_grid, (i + 1) * self.sample_grid))
|
245 |
+
|
246 |
+
sample_path_maps = os.path.join(self.save_sample_path, model_name +'-'+ self.test_data+ '-sample-%d-to-%d-2.png' % (
|
247 |
+
i * self.sample_grid, (i + 1) * self.sample_grid))
|
248 |
+
|
249 |
+
sample_path_channels = os.path.join(self.save_sample_path, model_name +'-'+ self.test_data+ '-sample-%d-to-%d-3.png' % (
|
250 |
+
i * self.sample_grid, (i + 1) * self.sample_grid))
|
251 |
+
|
252 |
+
merged_img = merge_images_landmarks_maps(
|
253 |
+
batch_images, batch_maps_small_pred, image_size=self.image_size,
|
254 |
+
num_landmarks=self.num_landmarks, num_samples=self.sample_grid,
|
255 |
+
scale=self.scale,circle_size=0)
|
256 |
+
|
257 |
+
merged_map = merge_compare_maps(
|
258 |
+
batch_maps_gt, batch_maps_small_pred,image_size=self.image_size/4,
|
259 |
+
num_landmarks=self.num_landmarks, num_samples=self.sample_grid)
|
260 |
+
|
261 |
+
map_per_channel = map_comapre_channels(
|
262 |
+
batch_images, batch_maps_small_pred,batch_maps_gt, image_size=self.image_size / 4,
|
263 |
+
num_landmarks=self.num_landmarks, scale=self.scale)
|
264 |
+
|
265 |
+
scipy.misc.imsave(sample_path_imgs, merged_img)
|
266 |
+
scipy.misc.imsave(sample_path_maps, merged_map)
|
267 |
+
scipy.misc.imsave(sample_path_channels, map_per_channel)
|
268 |
+
|
269 |
+
print ('saved %s' % sample_path_imgs)
|
270 |
+
|
271 |
+
def train(self):
|
272 |
+
tf.set_random_seed(1234)
|
273 |
+
np.random.seed(1234)
|
274 |
+
# build a graph
|
275 |
+
# add placeholders
|
276 |
+
self.add_placeholders()
|
277 |
+
# build model
|
278 |
+
self.build_model()
|
279 |
+
# create loss ops
|
280 |
+
self.create_loss_ops()
|
281 |
+
# create summary ops
|
282 |
+
self.create_summary_ops()
|
283 |
+
|
284 |
+
# create optimizer and training op
|
285 |
+
global_step = tf.Variable(0, trainable=False)
|
286 |
+
lr = tf.train.exponential_decay(self.learning_rate,global_step, self.step, self.gamma, staircase=True)
|
287 |
+
optimizer = tf.train.MomentumOptimizer(lr,self.momentum)
|
288 |
+
|
289 |
+
train_op = optimizer.minimize(self.total_loss,global_step=global_step)
|
290 |
+
|
291 |
+
with tf.Session(config=self.config) as sess:
|
292 |
+
|
293 |
+
tf.global_variables_initializer().run()
|
294 |
+
|
295 |
+
# create model saver and file writer
|
296 |
+
summary_writer = tf.summary.FileWriter(logdir=self.save_log_path, graph=tf.get_default_graph())
|
297 |
+
saver = tf.train.Saver()
|
298 |
+
|
299 |
+
print
|
300 |
+
print('*** Start Training ***')
|
301 |
+
|
302 |
+
# set random seed
|
303 |
+
epoch = 0
|
304 |
+
|
305 |
+
num_train_images = len(self.img_menpo_list)
|
306 |
+
if self.debug:
|
307 |
+
num_train_images=self.debug_data_size
|
308 |
+
|
309 |
+
img_inds = np.arange(num_train_images)
|
310 |
+
np.random.shuffle(img_inds)
|
311 |
+
|
312 |
+
for step in range(self.train_iter + 1):
|
313 |
+
|
314 |
+
# get batch images
|
315 |
+
j = step % int(float(num_train_images) / float(self.batch_size))
|
316 |
+
|
317 |
+
if step > 0 and j == 0:
|
318 |
+
np.random.shuffle(img_inds) # shuffle data if finished epoch
|
319 |
+
epoch += 1
|
320 |
+
|
321 |
+
batch_inds = img_inds[j * self.batch_size:(j + 1) * self.batch_size]
|
322 |
+
|
323 |
+
batch_images, _, batch_maps_small, batch_lms_small =\
|
324 |
+
load_data(self.img_menpo_list, batch_inds, image_size=self.image_size, c_dim=self.c_dim,
|
325 |
+
num_landmarks=self.num_landmarks, sigma=self.sigma, scale=self.scale,
|
326 |
+
save_landmarks=self.compute_nme, primary=True)
|
327 |
+
|
328 |
+
feed_dict_train = {self.train_images: batch_images, self.train_heatmaps_small: batch_maps_small}
|
329 |
+
|
330 |
+
sess.run(train_op, feed_dict_train)
|
331 |
+
|
332 |
+
# save to log and print status
|
333 |
+
if step == 0 or (step + 1) % self.print_every == 0:
|
334 |
+
|
335 |
+
if self.compute_nme:
|
336 |
+
batch_maps_small_pred = sess.run(self.pred_hm_p, {self.train_images: batch_images})
|
337 |
+
pred_lms_small = batch_heat_maps_to_image(
|
338 |
+
batch_maps_small_pred, self.batch_size, image_size=self.image_size/4,
|
339 |
+
num_landmarks=self.num_landmarks)
|
340 |
+
|
341 |
+
feed_dict_log = {
|
342 |
+
self.train_images: batch_images, self.train_heatmaps_small: batch_maps_small,
|
343 |
+
self.train_lms_small: batch_lms_small, self.pred_lms_small: pred_lms_small}
|
344 |
+
else:
|
345 |
+
feed_dict_log = feed_dict_train
|
346 |
+
|
347 |
+
summary, l_t,l_nme = sess.run([self.batch_summary_op, self.total_loss, self.nme_loss],
|
348 |
+
feed_dict_log)
|
349 |
+
|
350 |
+
summary_writer.add_summary(summary, step)
|
351 |
+
|
352 |
+
print ('epoch: [%d] step: [%d/%d] primary loss: [%.6f] nme loss: [%.6f] ' % (
|
353 |
+
epoch, step + 1, self.train_iter, l_t, l_nme))
|
354 |
+
|
355 |
+
# save model
|
356 |
+
if (step + 1) % self.save_every == 0:
|
357 |
+
saver.save(sess, os.path.join(self.save_model_path, 'deep_heatmaps'), global_step=step + 1)
|
358 |
+
print ('model/deep-heatmaps-%d saved' % (step + 1))
|
359 |
+
|
360 |
+
# save images with landmarks
|
361 |
+
if (self.sample_every_epoch is False) and (step == 0 or (step + 1) % self.sample_every == 0):
|
362 |
+
|
363 |
+
if not self.compute_nme:
|
364 |
+
batch_maps_small_pred = sess.run(self.pred_hm_p, {self.train_images: batch_images})
|
365 |
+
|
366 |
+
print 'small map vals', batch_maps_small_pred.min(), batch_maps_small_pred.max()
|
367 |
+
|
368 |
+
sample_path_imgs = os.path.join(self.save_sample_path,'epoch-%d-train-iter-%d-1.png'
|
369 |
+
% (epoch, step + 1))
|
370 |
+
sample_path_maps = os.path.join(self.save_sample_path,'epoch-%d-train-iter-%d-2.png'
|
371 |
+
% (epoch, step + 1))
|
372 |
+
sample_path_ch_maps = os.path.join(self.save_sample_path, 'epoch-%d-train-iter-%d-3.png'
|
373 |
+
% (epoch, step + 1))
|
374 |
+
|
375 |
+
merged_img = merge_images_landmarks_maps(
|
376 |
+
batch_images, batch_maps_small_pred, image_size=self.image_size,
|
377 |
+
num_landmarks=self.num_landmarks, num_samples=self.sample_grid, scale=self.scale,
|
378 |
+
circle_size=0)
|
379 |
+
|
380 |
+
merged_map = merge_compare_maps(
|
381 |
+
batch_maps_small_pred, batch_maps_small, image_size=self.image_size/4,
|
382 |
+
num_landmarks=self.num_landmarks, num_samples=self.sample_grid)
|
383 |
+
|
384 |
+
map_per_channel = map_comapre_channels(batch_images, batch_maps_small_pred, batch_maps_small,
|
385 |
+
image_size=self.image_size/4,
|
386 |
+
num_landmarks=self.num_landmarks,scale=self.scale)
|
387 |
+
|
388 |
+
scipy.misc.imsave(sample_path_imgs, merged_img)
|
389 |
+
scipy.misc.imsave(sample_path_maps, merged_map)
|
390 |
+
scipy.misc.imsave(sample_path_ch_maps, map_per_channel)
|
391 |
+
|
392 |
print('*** Finished Training ***')
|
MakeItTalk/thirdparty/face_of_art/old/eval_scripts/evaluate_and_compare_multiple_models.py
CHANGED
@@ -1,82 +1,82 @@
|
|
1 |
-
from evaluation_functions import *
|
2 |
-
from glob import glob
|
3 |
-
|
4 |
-
flags = tf.app.flags
|
5 |
-
|
6 |
-
data_dir = '/Users/arik/Dropbox/a_mac_thesis/face_heatmap_networks/conventional_landmark_detection_dataset/'
|
7 |
-
models_dir = '/Users/arik/Dropbox/a_mac_thesis/face_heatmap_networks/ect_like/saved_models/test'
|
8 |
-
|
9 |
-
# define paths
|
10 |
-
flags.DEFINE_string('img_dir', data_dir, 'data directory')
|
11 |
-
flags.DEFINE_string('test_data', 'test', 'test set to use full/common/challenging/test/art')
|
12 |
-
flags.DEFINE_string('models_dir', models_dir, 'directory containing multiple models to evaluate and compare')
|
13 |
-
|
14 |
-
# parameters used to train network
|
15 |
-
flags.DEFINE_integer('image_size', 256, 'image size')
|
16 |
-
flags.DEFINE_integer('c_dim', 3, 'color channels')
|
17 |
-
flags.DEFINE_integer('num_landmarks', 68, 'number of face landmarks')
|
18 |
-
flags.DEFINE_integer('scale', 1, 'scale for image normalization 255/1/0')
|
19 |
-
flags.DEFINE_float('margin', 0.25, 'margin for face crops - % of bb size')
|
20 |
-
flags.DEFINE_string('bb_type', 'gt', "bb to use - 'gt':for ground truth / 'init':for face detector output")
|
21 |
-
|
22 |
-
# choose batch size and debug data size
|
23 |
-
flags.DEFINE_integer('batch_size', 10, 'batch size')
|
24 |
-
flags.DEFINE_bool('debug', True, 'run in debug mode - use subset of the data')
|
25 |
-
flags.DEFINE_integer('debug_data_size', 50, 'subset data size to test in debug mode')
|
26 |
-
|
27 |
-
# statistics parameters
|
28 |
-
flags.DEFINE_float('max_error', 0.08, 'error threshold to be considered as failure')
|
29 |
-
flags.DEFINE_bool('save_log', True, 'save statistics to log_dir')
|
30 |
-
flags.DEFINE_string('log_path', 'logs/nme_statistics', 'direcotory for saving NME statistics')
|
31 |
-
|
32 |
-
FLAGS = flags.FLAGS
|
33 |
-
|
34 |
-
|
35 |
-
def main(_):
|
36 |
-
|
37 |
-
# create directories if not exist
|
38 |
-
if not tf.gfile.Exists(FLAGS.log_path):
|
39 |
-
tf.gfile.MakeDirs(FLAGS.log_path)
|
40 |
-
|
41 |
-
test_model_dirs = glob(os.path.join(FLAGS.models_dir, '*/'))
|
42 |
-
|
43 |
-
model_names = []
|
44 |
-
model_errors = []
|
45 |
-
|
46 |
-
for i, model_dir in enumerate(test_model_dirs):
|
47 |
-
|
48 |
-
model_name = model_dir.split('/')[-2]
|
49 |
-
|
50 |
-
if 'primary' in model_name.lower():
|
51 |
-
net_type = 'Primary'
|
52 |
-
elif 'fusion' in model_name.lower():
|
53 |
-
net_type = 'Fusion'
|
54 |
-
else:
|
55 |
-
sys.exit('\n*** Error: please give informative names for model directories, including network type! ***')
|
56 |
-
|
57 |
-
model_path = glob(os.path.join(model_dir, '*meta'))[0].split('.meta')[0]
|
58 |
-
|
59 |
-
print ('\n##### EVALUATING MODELS (%d/%d) #####' % (i+1,len(test_model_dirs)))
|
60 |
-
|
61 |
-
tf.reset_default_graph() # reset graph
|
62 |
-
|
63 |
-
err = evaluate_heatmap_network(
|
64 |
-
model_path=model_path, network_type=net_type, img_path=FLAGS.img_dir, test_data=FLAGS.test_data,
|
65 |
-
batch_size=FLAGS.batch_size, image_size=FLAGS.image_size, margin=FLAGS.margin,
|
66 |
-
bb_type=FLAGS.bb_type, c_dim=FLAGS.c_dim, scale=FLAGS.scale, num_landmarks=FLAGS.num_landmarks,
|
67 |
-
debug=FLAGS.debug, debug_data_size=FLAGS.debug_data_size)
|
68 |
-
|
69 |
-
print_nme_statistics(
|
70 |
-
errors=err, model_path=model_path, network_type=net_type, test_data=FLAGS.test_data,
|
71 |
-
max_error=FLAGS.max_error, save_log=False, log_path=FLAGS.log_path,plot_ced=False)
|
72 |
-
|
73 |
-
model_names.append(model_name)
|
74 |
-
model_errors.append(err)
|
75 |
-
|
76 |
-
print_ced_compare_methods(
|
77 |
-
method_errors=tuple(model_errors),method_names=tuple(model_names), test_data=FLAGS.test_data,
|
78 |
-
log_path=FLAGS.log_path, save_log=FLAGS.save_log)
|
79 |
-
|
80 |
-
|
81 |
-
if __name__ == '__main__':
|
82 |
-
tf.app.run()
|
|
|
1 |
+
from evaluation_functions import *
|
2 |
+
from glob import glob
|
3 |
+
|
4 |
+
flags = tf.app.flags
|
5 |
+
|
6 |
+
data_dir = '/Users/arik/Dropbox/a_mac_thesis/face_heatmap_networks/conventional_landmark_detection_dataset/'
|
7 |
+
models_dir = '/Users/arik/Dropbox/a_mac_thesis/face_heatmap_networks/ect_like/saved_models/test'
|
8 |
+
|
9 |
+
# define paths
|
10 |
+
flags.DEFINE_string('img_dir', data_dir, 'data directory')
|
11 |
+
flags.DEFINE_string('test_data', 'test', 'test set to use full/common/challenging/test/art')
|
12 |
+
flags.DEFINE_string('models_dir', models_dir, 'directory containing multiple models to evaluate and compare')
|
13 |
+
|
14 |
+
# parameters used to train network
|
15 |
+
flags.DEFINE_integer('image_size', 256, 'image size')
|
16 |
+
flags.DEFINE_integer('c_dim', 3, 'color channels')
|
17 |
+
flags.DEFINE_integer('num_landmarks', 68, 'number of face landmarks')
|
18 |
+
flags.DEFINE_integer('scale', 1, 'scale for image normalization 255/1/0')
|
19 |
+
flags.DEFINE_float('margin', 0.25, 'margin for face crops - % of bb size')
|
20 |
+
flags.DEFINE_string('bb_type', 'gt', "bb to use - 'gt':for ground truth / 'init':for face detector output")
|
21 |
+
|
22 |
+
# choose batch size and debug data size
|
23 |
+
flags.DEFINE_integer('batch_size', 10, 'batch size')
|
24 |
+
flags.DEFINE_bool('debug', True, 'run in debug mode - use subset of the data')
|
25 |
+
flags.DEFINE_integer('debug_data_size', 50, 'subset data size to test in debug mode')
|
26 |
+
|
27 |
+
# statistics parameters
|
28 |
+
flags.DEFINE_float('max_error', 0.08, 'error threshold to be considered as failure')
|
29 |
+
flags.DEFINE_bool('save_log', True, 'save statistics to log_dir')
|
30 |
+
flags.DEFINE_string('log_path', 'logs/nme_statistics', 'direcotory for saving NME statistics')
|
31 |
+
|
32 |
+
FLAGS = flags.FLAGS
|
33 |
+
|
34 |
+
|
35 |
+
def main(_):
|
36 |
+
|
37 |
+
# create directories if not exist
|
38 |
+
if not tf.gfile.Exists(FLAGS.log_path):
|
39 |
+
tf.gfile.MakeDirs(FLAGS.log_path)
|
40 |
+
|
41 |
+
test_model_dirs = glob(os.path.join(FLAGS.models_dir, '*/'))
|
42 |
+
|
43 |
+
model_names = []
|
44 |
+
model_errors = []
|
45 |
+
|
46 |
+
for i, model_dir in enumerate(test_model_dirs):
|
47 |
+
|
48 |
+
model_name = model_dir.split('/')[-2]
|
49 |
+
|
50 |
+
if 'primary' in model_name.lower():
|
51 |
+
net_type = 'Primary'
|
52 |
+
elif 'fusion' in model_name.lower():
|
53 |
+
net_type = 'Fusion'
|
54 |
+
else:
|
55 |
+
sys.exit('\n*** Error: please give informative names for model directories, including network type! ***')
|
56 |
+
|
57 |
+
model_path = glob(os.path.join(model_dir, '*meta'))[0].split('.meta')[0]
|
58 |
+
|
59 |
+
print ('\n##### EVALUATING MODELS (%d/%d) #####' % (i+1,len(test_model_dirs)))
|
60 |
+
|
61 |
+
tf.reset_default_graph() # reset graph
|
62 |
+
|
63 |
+
err = evaluate_heatmap_network(
|
64 |
+
model_path=model_path, network_type=net_type, img_path=FLAGS.img_dir, test_data=FLAGS.test_data,
|
65 |
+
batch_size=FLAGS.batch_size, image_size=FLAGS.image_size, margin=FLAGS.margin,
|
66 |
+
bb_type=FLAGS.bb_type, c_dim=FLAGS.c_dim, scale=FLAGS.scale, num_landmarks=FLAGS.num_landmarks,
|
67 |
+
debug=FLAGS.debug, debug_data_size=FLAGS.debug_data_size)
|
68 |
+
|
69 |
+
print_nme_statistics(
|
70 |
+
errors=err, model_path=model_path, network_type=net_type, test_data=FLAGS.test_data,
|
71 |
+
max_error=FLAGS.max_error, save_log=False, log_path=FLAGS.log_path,plot_ced=False)
|
72 |
+
|
73 |
+
model_names.append(model_name)
|
74 |
+
model_errors.append(err)
|
75 |
+
|
76 |
+
print_ced_compare_methods(
|
77 |
+
method_errors=tuple(model_errors),method_names=tuple(model_names), test_data=FLAGS.test_data,
|
78 |
+
log_path=FLAGS.log_path, save_log=FLAGS.save_log)
|
79 |
+
|
80 |
+
|
81 |
+
if __name__ == '__main__':
|
82 |
+
tf.app.run()
|
MakeItTalk/thirdparty/face_of_art/old/eval_scripts/evaluate_model.py
CHANGED
@@ -1,54 +1,54 @@
|
|
1 |
-
from evaluation_functions import *
|
2 |
-
|
3 |
-
flags = tf.app.flags
|
4 |
-
|
5 |
-
data_dir = '/Users/arik/Dropbox/a_mac_thesis/face_heatmap_networks/conventional_landmark_detection_dataset/'
|
6 |
-
model_path = '/Users/arik/Dropbox/a_mac_thesis/face_heatmap_networks/tests/primary/old/no_flip/basic/' \
|
7 |
-
'tests_lr_primary_basic_no_flip/0.01/model/deep_heatmaps-80000'
|
8 |
-
|
9 |
-
# define paths
|
10 |
-
flags.DEFINE_string('img_dir', data_dir, 'data directory')
|
11 |
-
flags.DEFINE_string('test_data', 'test', 'test set to use full/common/challenging/test/art')
|
12 |
-
flags.DEFINE_string('model_path', model_path, 'model path')
|
13 |
-
|
14 |
-
# parameters used to train network
|
15 |
-
flags.DEFINE_string('network_type', 'Primary', 'network architecture Fusion/Primary')
|
16 |
-
flags.DEFINE_integer('image_size', 256, 'image size')
|
17 |
-
flags.DEFINE_integer('c_dim', 3, 'color channels')
|
18 |
-
flags.DEFINE_integer('num_landmarks', 68, 'number of face landmarks')
|
19 |
-
flags.DEFINE_integer('scale', 1, 'scale for image normalization 255/1/0')
|
20 |
-
flags.DEFINE_float('margin', 0.25, 'margin for face crops - % of bb size')
|
21 |
-
flags.DEFINE_string('bb_type', 'gt', "bb to use - 'gt':for ground truth / 'init':for face detector output")
|
22 |
-
|
23 |
-
# choose batch size and debug data size
|
24 |
-
flags.DEFINE_integer('batch_size', 2, 'batch size')
|
25 |
-
flags.DEFINE_bool('debug', True, 'run in debug mode - use subset of the data')
|
26 |
-
flags.DEFINE_integer('debug_data_size', 4, 'subset data size to test in debug mode')
|
27 |
-
|
28 |
-
# statistics parameters
|
29 |
-
flags.DEFINE_float('max_error', 0.08, 'error threshold to be considered as failure')
|
30 |
-
flags.DEFINE_bool('save_log', True, 'save statistics to log_dir')
|
31 |
-
flags.DEFINE_string('log_path', 'logs/nme_statistics', 'directory for saving NME statistics')
|
32 |
-
|
33 |
-
FLAGS = flags.FLAGS
|
34 |
-
|
35 |
-
|
36 |
-
def main(_):
|
37 |
-
|
38 |
-
# create directories if not exist
|
39 |
-
if not tf.gfile.Exists(FLAGS.log_path):
|
40 |
-
tf.gfile.MakeDirs(FLAGS.log_path)
|
41 |
-
|
42 |
-
err = evaluate_heatmap_network(
|
43 |
-
model_path=FLAGS.model_path, network_type=FLAGS.network_type, img_path=FLAGS.img_dir,
|
44 |
-
test_data=FLAGS.test_data, batch_size=FLAGS.batch_size, image_size=FLAGS.image_size, margin=FLAGS.margin,
|
45 |
-
bb_type=FLAGS.bb_type, c_dim=FLAGS.c_dim, scale=FLAGS.scale, num_landmarks=FLAGS.num_landmarks,
|
46 |
-
debug=FLAGS.debug, debug_data_size=FLAGS.debug_data_size)
|
47 |
-
|
48 |
-
print_nme_statistics(
|
49 |
-
errors=err, model_path=FLAGS.model_path, network_type=FLAGS.network_type, test_data=FLAGS.test_data,
|
50 |
-
max_error=FLAGS.max_error, save_log=FLAGS.save_log, log_path=FLAGS.log_path)
|
51 |
-
|
52 |
-
|
53 |
-
if __name__ == '__main__':
|
54 |
-
tf.app.run()
|
|
|
1 |
+
from evaluation_functions import *
|
2 |
+
|
3 |
+
flags = tf.app.flags
|
4 |
+
|
5 |
+
data_dir = '/Users/arik/Dropbox/a_mac_thesis/face_heatmap_networks/conventional_landmark_detection_dataset/'
|
6 |
+
model_path = '/Users/arik/Dropbox/a_mac_thesis/face_heatmap_networks/tests/primary/old/no_flip/basic/' \
|
7 |
+
'tests_lr_primary_basic_no_flip/0.01/model/deep_heatmaps-80000'
|
8 |
+
|
9 |
+
# define paths
|
10 |
+
flags.DEFINE_string('img_dir', data_dir, 'data directory')
|
11 |
+
flags.DEFINE_string('test_data', 'test', 'test set to use full/common/challenging/test/art')
|
12 |
+
flags.DEFINE_string('model_path', model_path, 'model path')
|
13 |
+
|
14 |
+
# parameters used to train network
|
15 |
+
flags.DEFINE_string('network_type', 'Primary', 'network architecture Fusion/Primary')
|
16 |
+
flags.DEFINE_integer('image_size', 256, 'image size')
|
17 |
+
flags.DEFINE_integer('c_dim', 3, 'color channels')
|
18 |
+
flags.DEFINE_integer('num_landmarks', 68, 'number of face landmarks')
|
19 |
+
flags.DEFINE_integer('scale', 1, 'scale for image normalization 255/1/0')
|
20 |
+
flags.DEFINE_float('margin', 0.25, 'margin for face crops - % of bb size')
|
21 |
+
flags.DEFINE_string('bb_type', 'gt', "bb to use - 'gt':for ground truth / 'init':for face detector output")
|
22 |
+
|
23 |
+
# choose batch size and debug data size
|
24 |
+
flags.DEFINE_integer('batch_size', 2, 'batch size')
|
25 |
+
flags.DEFINE_bool('debug', True, 'run in debug mode - use subset of the data')
|
26 |
+
flags.DEFINE_integer('debug_data_size', 4, 'subset data size to test in debug mode')
|
27 |
+
|
28 |
+
# statistics parameters
|
29 |
+
flags.DEFINE_float('max_error', 0.08, 'error threshold to be considered as failure')
|
30 |
+
flags.DEFINE_bool('save_log', True, 'save statistics to log_dir')
|
31 |
+
flags.DEFINE_string('log_path', 'logs/nme_statistics', 'directory for saving NME statistics')
|
32 |
+
|
33 |
+
FLAGS = flags.FLAGS
|
34 |
+
|
35 |
+
|
36 |
+
def main(_):
|
37 |
+
|
38 |
+
# create directories if not exist
|
39 |
+
if not tf.gfile.Exists(FLAGS.log_path):
|
40 |
+
tf.gfile.MakeDirs(FLAGS.log_path)
|
41 |
+
|
42 |
+
err = evaluate_heatmap_network(
|
43 |
+
model_path=FLAGS.model_path, network_type=FLAGS.network_type, img_path=FLAGS.img_dir,
|
44 |
+
test_data=FLAGS.test_data, batch_size=FLAGS.batch_size, image_size=FLAGS.image_size, margin=FLAGS.margin,
|
45 |
+
bb_type=FLAGS.bb_type, c_dim=FLAGS.c_dim, scale=FLAGS.scale, num_landmarks=FLAGS.num_landmarks,
|
46 |
+
debug=FLAGS.debug, debug_data_size=FLAGS.debug_data_size)
|
47 |
+
|
48 |
+
print_nme_statistics(
|
49 |
+
errors=err, model_path=FLAGS.model_path, network_type=FLAGS.network_type, test_data=FLAGS.test_data,
|
50 |
+
max_error=FLAGS.max_error, save_log=FLAGS.save_log, log_path=FLAGS.log_path)
|
51 |
+
|
52 |
+
|
53 |
+
if __name__ == '__main__':
|
54 |
+
tf.app.run()
|
MakeItTalk/thirdparty/face_of_art/old/eval_scripts/evaluate_models.py
CHANGED
@@ -1,79 +1,79 @@
|
|
1 |
-
from evaluation_functions import *
|
2 |
-
from glob import glob
|
3 |
-
|
4 |
-
flags = tf.app.flags
|
5 |
-
|
6 |
-
data_dir = '/Users/arik/Dropbox/a_mac_thesis/face_heatmap_networks/conventional_landmark_detection_dataset/'
|
7 |
-
models_dir = 'tests_fusion'
|
8 |
-
pre_train_model_name = 'deep_heatmaps-50000'
|
9 |
-
datasets=['full','common','challenging','test']
|
10 |
-
|
11 |
-
# define paths
|
12 |
-
flags.DEFINE_string('img_dir', data_dir, 'data directory')
|
13 |
-
flags.DEFINE_string('models_dir', models_dir, 'directory containing multiple models to evaluate')
|
14 |
-
flags.DEFINE_string('model_name', pre_train_model_name, "model name. e.g: 'deep_heatmaps-50000'")
|
15 |
-
|
16 |
-
|
17 |
-
# parameters used to train network
|
18 |
-
flags.DEFINE_string('network_type', 'Primary', 'network architecture Fusion/Primary')
|
19 |
-
flags.DEFINE_integer('image_size', 256, 'image size')
|
20 |
-
flags.DEFINE_integer('c_dim', 3, 'color channels')
|
21 |
-
flags.DEFINE_integer('num_landmarks', 68, 'number of face landmarks')
|
22 |
-
flags.DEFINE_integer('scale', 1, 'scale for image normalization 255/1/0')
|
23 |
-
flags.DEFINE_float('margin', 0.25, 'margin for face crops - % of bb size')
|
24 |
-
flags.DEFINE_string('bb_type', 'gt', "bb to use - 'gt':for ground truth / 'init':for face detector output")
|
25 |
-
|
26 |
-
# choose batch size and debug data size
|
27 |
-
flags.DEFINE_integer('batch_size', 2, 'batch size')
|
28 |
-
flags.DEFINE_bool('debug', False, 'run in debug mode - use subset of the data')
|
29 |
-
flags.DEFINE_integer('debug_data_size', 4, 'subset data size to test in debug mode')
|
30 |
-
|
31 |
-
# statistics parameters
|
32 |
-
flags.DEFINE_float('max_error', 0.08, 'error threshold to be considered as failure')
|
33 |
-
flags.DEFINE_bool('save_log', True, 'save statistics to log_dir')
|
34 |
-
flags.DEFINE_string('log_path', 'logs/nme_statistics', 'directory for saving NME statistics')
|
35 |
-
|
36 |
-
FLAGS = flags.FLAGS
|
37 |
-
|
38 |
-
|
39 |
-
def main(_):
|
40 |
-
model_dirs = glob(os.path.join(FLAGS.models_dir,'*/'))
|
41 |
-
|
42 |
-
for test_data in datasets:
|
43 |
-
model_errors=[]
|
44 |
-
model_names=[]
|
45 |
-
|
46 |
-
for i, model_dir in enumerate(model_dirs):
|
47 |
-
print ('\n##### EVALUATING MODELS ON '+test_data+' set (%d/%d) #####' % (i + 1, len(model_dirs)))
|
48 |
-
# create directories if not exist
|
49 |
-
log_path = os.path.join(model_dir,'logs/nme_statistics')
|
50 |
-
if not os.path.exists(os.path.join(model_dir,'logs')):
|
51 |
-
os.mkdir(os.path.join(model_dir,'logs'))
|
52 |
-
if not os.path.exists(log_path):
|
53 |
-
os.mkdir(log_path)
|
54 |
-
|
55 |
-
model_name = model_dir.split('/')[-2]
|
56 |
-
|
57 |
-
tf.reset_default_graph() # reset graph
|
58 |
-
|
59 |
-
err = evaluate_heatmap_network(
|
60 |
-
model_path=os.path.join(model_dir,'model',FLAGS.model_name), network_type=FLAGS.network_type,
|
61 |
-
img_path=FLAGS.img_dir, test_data=test_data, batch_size=FLAGS.batch_size, image_size=FLAGS.image_size,
|
62 |
-
margin=FLAGS.margin, bb_type=FLAGS.bb_type, c_dim=FLAGS.c_dim, scale=FLAGS.scale,
|
63 |
-
num_landmarks=FLAGS.num_landmarks, debug=FLAGS.debug, debug_data_size=FLAGS.debug_data_size)
|
64 |
-
|
65 |
-
print_nme_statistics(
|
66 |
-
errors=err, model_path=os.path.join(model_dir,'model', FLAGS.model_name),
|
67 |
-
network_type=FLAGS.network_type, test_data=test_data, max_error=FLAGS.max_error,
|
68 |
-
save_log=FLAGS.save_log, log_path=log_path, plot_ced=False)
|
69 |
-
|
70 |
-
model_names.append(model_name)
|
71 |
-
model_errors.append(err)
|
72 |
-
|
73 |
-
print_ced_compare_methods(
|
74 |
-
method_errors=tuple(model_errors), method_names=tuple(model_names), test_data=test_data,
|
75 |
-
log_path=FLAGS.models_dir, save_log=FLAGS.save_log)
|
76 |
-
|
77 |
-
|
78 |
-
if __name__ == '__main__':
|
79 |
-
tf.app.run()
|
|
|
1 |
+
from evaluation_functions import *
|
2 |
+
from glob import glob
|
3 |
+
|
4 |
+
flags = tf.app.flags
|
5 |
+
|
6 |
+
data_dir = '/Users/arik/Dropbox/a_mac_thesis/face_heatmap_networks/conventional_landmark_detection_dataset/'
|
7 |
+
models_dir = 'tests_fusion'
|
8 |
+
pre_train_model_name = 'deep_heatmaps-50000'
|
9 |
+
datasets=['full','common','challenging','test']
|
10 |
+
|
11 |
+
# define paths
|
12 |
+
flags.DEFINE_string('img_dir', data_dir, 'data directory')
|
13 |
+
flags.DEFINE_string('models_dir', models_dir, 'directory containing multiple models to evaluate')
|
14 |
+
flags.DEFINE_string('model_name', pre_train_model_name, "model name. e.g: 'deep_heatmaps-50000'")
|
15 |
+
|
16 |
+
|
17 |
+
# parameters used to train network
|
18 |
+
flags.DEFINE_string('network_type', 'Primary', 'network architecture Fusion/Primary')
|
19 |
+
flags.DEFINE_integer('image_size', 256, 'image size')
|
20 |
+
flags.DEFINE_integer('c_dim', 3, 'color channels')
|
21 |
+
flags.DEFINE_integer('num_landmarks', 68, 'number of face landmarks')
|
22 |
+
flags.DEFINE_integer('scale', 1, 'scale for image normalization 255/1/0')
|
23 |
+
flags.DEFINE_float('margin', 0.25, 'margin for face crops - % of bb size')
|
24 |
+
flags.DEFINE_string('bb_type', 'gt', "bb to use - 'gt':for ground truth / 'init':for face detector output")
|
25 |
+
|
26 |
+
# choose batch size and debug data size
|
27 |
+
flags.DEFINE_integer('batch_size', 2, 'batch size')
|
28 |
+
flags.DEFINE_bool('debug', False, 'run in debug mode - use subset of the data')
|
29 |
+
flags.DEFINE_integer('debug_data_size', 4, 'subset data size to test in debug mode')
|
30 |
+
|
31 |
+
# statistics parameters
|
32 |
+
flags.DEFINE_float('max_error', 0.08, 'error threshold to be considered as failure')
|
33 |
+
flags.DEFINE_bool('save_log', True, 'save statistics to log_dir')
|
34 |
+
flags.DEFINE_string('log_path', 'logs/nme_statistics', 'directory for saving NME statistics')
|
35 |
+
|
36 |
+
FLAGS = flags.FLAGS
|
37 |
+
|
38 |
+
|
39 |
+
def main(_):
|
40 |
+
model_dirs = glob(os.path.join(FLAGS.models_dir,'*/'))
|
41 |
+
|
42 |
+
for test_data in datasets:
|
43 |
+
model_errors=[]
|
44 |
+
model_names=[]
|
45 |
+
|
46 |
+
for i, model_dir in enumerate(model_dirs):
|
47 |
+
print ('\n##### EVALUATING MODELS ON '+test_data+' set (%d/%d) #####' % (i + 1, len(model_dirs)))
|
48 |
+
# create directories if not exist
|
49 |
+
log_path = os.path.join(model_dir,'logs/nme_statistics')
|
50 |
+
if not os.path.exists(os.path.join(model_dir,'logs')):
|
51 |
+
os.mkdir(os.path.join(model_dir,'logs'))
|
52 |
+
if not os.path.exists(log_path):
|
53 |
+
os.mkdir(log_path)
|
54 |
+
|
55 |
+
model_name = model_dir.split('/')[-2]
|
56 |
+
|
57 |
+
tf.reset_default_graph() # reset graph
|
58 |
+
|
59 |
+
err = evaluate_heatmap_network(
|
60 |
+
model_path=os.path.join(model_dir,'model',FLAGS.model_name), network_type=FLAGS.network_type,
|
61 |
+
img_path=FLAGS.img_dir, test_data=test_data, batch_size=FLAGS.batch_size, image_size=FLAGS.image_size,
|
62 |
+
margin=FLAGS.margin, bb_type=FLAGS.bb_type, c_dim=FLAGS.c_dim, scale=FLAGS.scale,
|
63 |
+
num_landmarks=FLAGS.num_landmarks, debug=FLAGS.debug, debug_data_size=FLAGS.debug_data_size)
|
64 |
+
|
65 |
+
print_nme_statistics(
|
66 |
+
errors=err, model_path=os.path.join(model_dir,'model', FLAGS.model_name),
|
67 |
+
network_type=FLAGS.network_type, test_data=test_data, max_error=FLAGS.max_error,
|
68 |
+
save_log=FLAGS.save_log, log_path=log_path, plot_ced=False)
|
69 |
+
|
70 |
+
model_names.append(model_name)
|
71 |
+
model_errors.append(err)
|
72 |
+
|
73 |
+
print_ced_compare_methods(
|
74 |
+
method_errors=tuple(model_errors), method_names=tuple(model_names), test_data=test_data,
|
75 |
+
log_path=FLAGS.models_dir, save_log=FLAGS.save_log)
|
76 |
+
|
77 |
+
|
78 |
+
if __name__ == '__main__':
|
79 |
+
tf.app.run()
|
MakeItTalk/thirdparty/face_of_art/old/eval_scripts/evaluation_functions.py
CHANGED
@@ -1,300 +1,300 @@
|
|
1 |
-
import tensorflow as tf
|
2 |
-
from menpofit.visualize import plot_cumulative_error_distribution
|
3 |
-
from menpofit.error import compute_cumulative_error
|
4 |
-
from scipy.integrate import simps
|
5 |
-
from menpo_functions import load_menpo_image_list, load_bb_dictionary
|
6 |
-
from logging_functions import *
|
7 |
-
from data_loading_functions import *
|
8 |
-
from time import time
|
9 |
-
import sys
|
10 |
-
from PyQt5 import QtWidgets
|
11 |
-
qapp=QtWidgets.QApplication([''])
|
12 |
-
|
13 |
-
|
14 |
-
def load_menpo_test_list(img_dir, test_data='full', image_size=256, margin=0.25, bb_type='gt'):
|
15 |
-
mode = 'TEST'
|
16 |
-
bb_dir = os.path.join(img_dir, 'Bounding_Boxes')
|
17 |
-
bb_dictionary = load_bb_dictionary(bb_dir, mode, test_data=test_data)
|
18 |
-
img_menpo_list = load_menpo_image_list(
|
19 |
-
img_dir=img_dir, train_crop_dir=None, img_dir_ns=None, mode=mode, bb_dictionary=bb_dictionary,
|
20 |
-
image_size=image_size, margin=margin,
|
21 |
-
bb_type=bb_type, test_data=test_data, augment_basic=False, augment_texture=False, p_texture=0,
|
22 |
-
augment_geom=False, p_geom=0)
|
23 |
-
return img_menpo_list
|
24 |
-
|
25 |
-
|
26 |
-
def evaluate_heatmap_fusion_network(model_path, img_path, test_data, batch_size=10, image_size=256, margin=0.25,
|
27 |
-
bb_type='gt', c_dim=3, scale=1, num_landmarks=68, debug=False,
|
28 |
-
debug_data_size=20):
|
29 |
-
t = time()
|
30 |
-
from deep_heatmaps_model_fusion_net import DeepHeatmapsModel
|
31 |
-
import logging
|
32 |
-
logging.getLogger('tensorflow').disabled = True
|
33 |
-
|
34 |
-
# load test image menpo list
|
35 |
-
|
36 |
-
test_menpo_img_list = load_menpo_test_list(
|
37 |
-
img_path, test_data=test_data, image_size=image_size, margin=margin, bb_type=bb_type)
|
38 |
-
|
39 |
-
if debug:
|
40 |
-
test_menpo_img_list = test_menpo_img_list[:debug_data_size]
|
41 |
-
print ('\n*** FUSION NETWORK: calculating normalized mean error on: ' + test_data +
|
42 |
-
' set (%d images - debug mode) ***' % debug_data_size)
|
43 |
-
else:
|
44 |
-
print ('\n*** FUSION NETWORK: calculating normalized mean error on: ' + test_data + ' set (%d images) ***' %
|
45 |
-
(len(test_menpo_img_list)))
|
46 |
-
|
47 |
-
# create heatmap model
|
48 |
-
|
49 |
-
tf.reset_default_graph()
|
50 |
-
|
51 |
-
model = DeepHeatmapsModel(mode='TEST', batch_size=batch_size, image_size=image_size, c_dim=c_dim,
|
52 |
-
num_landmarks=num_landmarks, img_path=img_path, test_model_path=model_path,
|
53 |
-
test_data=test_data, menpo_verbose=False)
|
54 |
-
|
55 |
-
# add placeholders
|
56 |
-
model.add_placeholders()
|
57 |
-
# build model
|
58 |
-
model.build_model()
|
59 |
-
# create loss ops
|
60 |
-
model.create_loss_ops()
|
61 |
-
|
62 |
-
num_batches = int(1. * len(test_menpo_img_list) / batch_size)
|
63 |
-
if num_batches == 0:
|
64 |
-
batch_size = len(test_menpo_img_list)
|
65 |
-
num_batches = 1
|
66 |
-
|
67 |
-
reminder = len(test_menpo_img_list) - num_batches * batch_size
|
68 |
-
num_batches_reminder = num_batches + 1 * (reminder > 0)
|
69 |
-
img_inds = np.arange(len(test_menpo_img_list))
|
70 |
-
|
71 |
-
with tf.Session() as session:
|
72 |
-
|
73 |
-
# load trained parameters
|
74 |
-
saver = tf.train.Saver()
|
75 |
-
saver.restore(session, model_path)
|
76 |
-
|
77 |
-
print ('\nnum batches: ' + str(num_batches_reminder))
|
78 |
-
|
79 |
-
err = []
|
80 |
-
for j in range(num_batches):
|
81 |
-
print ('batch %d / %d ...' % (j + 1, num_batches_reminder))
|
82 |
-
batch_inds = img_inds[j * batch_size:(j + 1) * batch_size]
|
83 |
-
|
84 |
-
batch_images, _, batch_landmarks_gt = load_images_landmarks(
|
85 |
-
test_menpo_img_list, batch_inds=batch_inds, image_size=image_size,
|
86 |
-
c_dim=c_dim, num_landmarks=num_landmarks, scale=scale)
|
87 |
-
|
88 |
-
batch_maps_pred = session.run(model.pred_hm_f, {model.images: batch_images})
|
89 |
-
|
90 |
-
batch_pred_landmarks = batch_heat_maps_to_landmarks(
|
91 |
-
batch_maps_pred, batch_size=batch_size, image_size=image_size, num_landmarks=num_landmarks)
|
92 |
-
|
93 |
-
batch_err = session.run(
|
94 |
-
model.nme_per_image, {model.lms: batch_landmarks_gt, model.pred_lms: batch_pred_landmarks})
|
95 |
-
err = np.hstack((err, batch_err))
|
96 |
-
|
97 |
-
if reminder > 0:
|
98 |
-
print ('batch %d / %d ...' % (j + 2, num_batches_reminder))
|
99 |
-
reminder_inds = img_inds[-reminder:]
|
100 |
-
|
101 |
-
batch_images, _, batch_landmarks_gt = load_images_landmarks(
|
102 |
-
test_menpo_img_list, batch_inds=reminder_inds, image_size=image_size,
|
103 |
-
c_dim=c_dim, num_landmarks=num_landmarks, scale=scale)
|
104 |
-
|
105 |
-
batch_maps_pred = session.run(model.pred_hm_f, {model.images: batch_images})
|
106 |
-
|
107 |
-
batch_pred_landmarks = batch_heat_maps_to_landmarks(
|
108 |
-
batch_maps_pred, batch_size=reminder, image_size=image_size, num_landmarks=num_landmarks)
|
109 |
-
|
110 |
-
batch_err = session.run(
|
111 |
-
model.nme_per_image, {model.lms: batch_landmarks_gt, model.pred_lms: batch_pred_landmarks})
|
112 |
-
err = np.hstack((err, batch_err))
|
113 |
-
|
114 |
-
print ('\ndone!')
|
115 |
-
print ('run time: ' + str(time() - t))
|
116 |
-
|
117 |
-
return err
|
118 |
-
|
119 |
-
|
120 |
-
def evaluate_heatmap_primary_network(model_path, img_path, test_data, batch_size=10, image_size=256, margin=0.25,
|
121 |
-
bb_type='gt', c_dim=3, scale=1, num_landmarks=68, debug=False,
|
122 |
-
debug_data_size=20):
|
123 |
-
t = time()
|
124 |
-
from deep_heatmaps_model_primary_net import DeepHeatmapsModel
|
125 |
-
import logging
|
126 |
-
logging.getLogger('tensorflow').disabled = True
|
127 |
-
|
128 |
-
# load test image menpo list
|
129 |
-
|
130 |
-
test_menpo_img_list = load_menpo_test_list(
|
131 |
-
img_path, test_data=test_data, image_size=image_size, margin=margin, bb_type=bb_type)
|
132 |
-
|
133 |
-
if debug:
|
134 |
-
test_menpo_img_list = test_menpo_img_list[:debug_data_size]
|
135 |
-
print ('\n*** PRIMARY NETWORK: calculating normalized mean error on: ' + test_data +
|
136 |
-
' set (%d images - debug mode) ***' % debug_data_size)
|
137 |
-
else:
|
138 |
-
print ('\n*** PRIMARY NETWORK: calculating normalized mean error on: ' + test_data +
|
139 |
-
' set (%d images) ***' % (len(test_menpo_img_list)))
|
140 |
-
|
141 |
-
# create heatmap model
|
142 |
-
|
143 |
-
tf.reset_default_graph()
|
144 |
-
|
145 |
-
model = DeepHeatmapsModel(mode='TEST', batch_size=batch_size, image_size=image_size, c_dim=c_dim,
|
146 |
-
num_landmarks=num_landmarks, img_path=img_path, test_model_path=model_path,
|
147 |
-
test_data=test_data, menpo_verbose=False)
|
148 |
-
|
149 |
-
# add placeholders
|
150 |
-
model.add_placeholders()
|
151 |
-
# build model
|
152 |
-
model.build_model()
|
153 |
-
# create loss ops
|
154 |
-
model.create_loss_ops()
|
155 |
-
|
156 |
-
num_batches = int(1. * len(test_menpo_img_list) / batch_size)
|
157 |
-
if num_batches == 0:
|
158 |
-
batch_size = len(test_menpo_img_list)
|
159 |
-
num_batches = 1
|
160 |
-
|
161 |
-
reminder = len(test_menpo_img_list) - num_batches * batch_size
|
162 |
-
num_batches_reminder = num_batches + 1 * (reminder > 0)
|
163 |
-
img_inds = np.arange(len(test_menpo_img_list))
|
164 |
-
|
165 |
-
with tf.Session() as session:
|
166 |
-
|
167 |
-
# load trained parameters
|
168 |
-
saver = tf.train.Saver()
|
169 |
-
saver.restore(session, model_path)
|
170 |
-
|
171 |
-
print ('\nnum batches: ' + str(num_batches_reminder))
|
172 |
-
|
173 |
-
err = []
|
174 |
-
for j in range(num_batches):
|
175 |
-
print ('batch %d / %d ...' % (j + 1, num_batches_reminder))
|
176 |
-
batch_inds = img_inds[j * batch_size:(j + 1) * batch_size]
|
177 |
-
|
178 |
-
batch_images, _, batch_landmarks_gt = load_images_landmarks(
|
179 |
-
test_menpo_img_list, batch_inds=batch_inds, image_size=image_size,
|
180 |
-
c_dim=c_dim, num_landmarks=num_landmarks, scale=scale)
|
181 |
-
|
182 |
-
batch_maps_small_pred = session.run(model.pred_hm_p, {model.images: batch_images})
|
183 |
-
|
184 |
-
batch_maps_small_pred = zoom(batch_maps_small_pred, zoom=[1, 4, 4, 1], order=1) # NN interpolation
|
185 |
-
|
186 |
-
batch_pred_landmarks = batch_heat_maps_to_landmarks(
|
187 |
-
batch_maps_small_pred, batch_size=batch_size, image_size=image_size,
|
188 |
-
num_landmarks=num_landmarks)
|
189 |
-
|
190 |
-
batch_err = session.run(
|
191 |
-
model.nme_per_image, {model.lms_small: batch_landmarks_gt, model.pred_lms_small: batch_pred_landmarks})
|
192 |
-
err = np.hstack((err, batch_err))
|
193 |
-
|
194 |
-
if reminder > 0:
|
195 |
-
print ('batch %d / %d ...' % (j + 2, num_batches_reminder))
|
196 |
-
reminder_inds = img_inds[-reminder:]
|
197 |
-
|
198 |
-
batch_images, _, batch_landmarks_gt = load_images_landmarks(
|
199 |
-
test_menpo_img_list, batch_inds=reminder_inds, image_size=image_size,
|
200 |
-
c_dim=c_dim, num_landmarks=num_landmarks, scale=scale)
|
201 |
-
|
202 |
-
batch_maps_small_pred = session.run(model.pred_hm_p, {model.images: batch_images})
|
203 |
-
|
204 |
-
batch_maps_small_pred = zoom(batch_maps_small_pred, zoom=[1, 4, 4, 1], order=1) # NN interpolation
|
205 |
-
|
206 |
-
batch_pred_landmarks = batch_heat_maps_to_landmarks(
|
207 |
-
batch_maps_small_pred, batch_size=reminder, image_size=image_size,
|
208 |
-
num_landmarks=num_landmarks)
|
209 |
-
|
210 |
-
batch_err = session.run(
|
211 |
-
model.nme_per_image, {model.lms_small: batch_landmarks_gt, model.pred_lms_small: batch_pred_landmarks})
|
212 |
-
err = np.hstack((err, batch_err))
|
213 |
-
|
214 |
-
print ('\ndone!')
|
215 |
-
print ('run time: ' + str(time() - t))
|
216 |
-
|
217 |
-
return err
|
218 |
-
|
219 |
-
|
220 |
-
def evaluate_heatmap_network(model_path, network_type, img_path, test_data, batch_size=10, image_size=256, margin=0.25,
|
221 |
-
bb_type='gt', c_dim=3, scale=1, num_landmarks=68, debug=False,
|
222 |
-
debug_data_size=20):
|
223 |
-
|
224 |
-
if network_type.lower() == 'fusion':
|
225 |
-
return evaluate_heatmap_fusion_network(
|
226 |
-
model_path=model_path, img_path=img_path, test_data=test_data, batch_size=batch_size, image_size=image_size,
|
227 |
-
margin=margin, bb_type=bb_type, c_dim=c_dim, scale=scale, num_landmarks=num_landmarks, debug=debug,
|
228 |
-
debug_data_size=debug_data_size)
|
229 |
-
elif network_type.lower() == 'primary':
|
230 |
-
return evaluate_heatmap_primary_network(
|
231 |
-
model_path=model_path, img_path=img_path, test_data=test_data, batch_size=batch_size, image_size=image_size,
|
232 |
-
margin=margin, bb_type=bb_type, c_dim=c_dim, scale=scale, num_landmarks=num_landmarks, debug=debug,
|
233 |
-
debug_data_size=debug_data_size)
|
234 |
-
else:
|
235 |
-
sys.exit('\n*** Error: please choose a valid network type: Fusion/Primary ***')
|
236 |
-
|
237 |
-
|
238 |
-
def AUC(errors, max_error, step_error=0.0001):
|
239 |
-
x_axis = list(np.arange(0., max_error + step_error, step_error))
|
240 |
-
ced = np.array(compute_cumulative_error(errors, x_axis))
|
241 |
-
return simps(ced, x=x_axis) / max_error, 1. - ced[-1]
|
242 |
-
|
243 |
-
|
244 |
-
def print_nme_statistics(
|
245 |
-
errors, model_path, network_type, test_data, max_error=0.08, log_path='', save_log=True, plot_ced=True,
|
246 |
-
norm='interocular distance'):
|
247 |
-
auc, failures = AUC(errors, max_error=max_error)
|
248 |
-
|
249 |
-
print ("\n****** NME statistics for " + network_type + " Network ******\n")
|
250 |
-
print ("* model path: " + model_path)
|
251 |
-
print ("* dataset: " + test_data + ' set')
|
252 |
-
|
253 |
-
print ("\n* Normalized mean error (percentage of "+norm+"): %.2f" % (100 * np.mean(errors)))
|
254 |
-
print ("\n* AUC @ %.2f: %.2f" % (max_error, 100 * auc))
|
255 |
-
print ("\n* failure rate @ %.2f: %.2f" % (max_error, 100 * failures) + '%')
|
256 |
-
|
257 |
-
if plot_ced:
|
258 |
-
plt.figure()
|
259 |
-
plt.yticks(np.linspace(0, 1, 11))
|
260 |
-
plot_cumulative_error_distribution(
|
261 |
-
list(errors),
|
262 |
-
legend_entries=[network_type],
|
263 |
-
marker_style=['s'],
|
264 |
-
marker_size=7,
|
265 |
-
x_label='Normalised Point-to-Point Error\n('+norm+')\n*' + test_data + ' set*',
|
266 |
-
)
|
267 |
-
|
268 |
-
if save_log:
|
269 |
-
with open(os.path.join(log_path, network_type.lower() + "_nme_statistics_on_" + test_data + "_set.txt"),
|
270 |
-
"wb") as f:
|
271 |
-
f.write(b"************************************************")
|
272 |
-
f.write(("\n****** NME statistics for " + str(network_type) + " Network ******\n").encode())
|
273 |
-
f.write(b"************************************************")
|
274 |
-
f.write(("\n\n* model path: " + str(model_path)).encode())
|
275 |
-
f.write(("\n\n* dataset: " + str(test_data) + ' set').encode())
|
276 |
-
f.write(b"\n\n* Normalized mean error (percentage of "+norm+"): %.2f" % (100 * np.mean(errors)))
|
277 |
-
f.write(b"\n\n* AUC @ %.2f: %.2f" % (max_error, 100 * auc))
|
278 |
-
f.write(("\n\n* failure rate @ %.2f: %.2f" % (max_error, 100 * failures) + '%').encode())
|
279 |
-
if plot_ced:
|
280 |
-
plt.savefig(os.path.join(log_path, network_type.lower() + '_nme_ced_on_' + test_data + '_set.png'),
|
281 |
-
bbox_inches='tight')
|
282 |
-
plt.close()
|
283 |
-
|
284 |
-
print ('\nlog path: ' + log_path)
|
285 |
-
|
286 |
-
|
287 |
-
def print_ced_compare_methods(
|
288 |
-
method_errors,method_names,test_data,log_path='', save_log=True, norm='interocular distance'):
|
289 |
-
plt.yticks(np.linspace(0, 1, 11))
|
290 |
-
plot_cumulative_error_distribution(
|
291 |
-
[list(err) for err in list(method_errors)],
|
292 |
-
legend_entries=list(method_names),
|
293 |
-
marker_style=['s'],
|
294 |
-
marker_size=7,
|
295 |
-
x_label='Normalised Point-to-Point Error\n('+norm+')\n*'+test_data+' set*'
|
296 |
-
)
|
297 |
-
if save_log:
|
298 |
-
plt.savefig(os.path.join(log_path,'nme_ced_on_'+test_data+'_set.png'), bbox_inches='tight')
|
299 |
-
print ('ced plot path: ' + os.path.join(log_path,'nme_ced_on_'+test_data+'_set.png'))
|
300 |
plt.close()
|
|
|
1 |
+
import tensorflow as tf
|
2 |
+
from menpofit.visualize import plot_cumulative_error_distribution
|
3 |
+
from menpofit.error import compute_cumulative_error
|
4 |
+
from scipy.integrate import simps
|
5 |
+
from menpo_functions import load_menpo_image_list, load_bb_dictionary
|
6 |
+
from logging_functions import *
|
7 |
+
from data_loading_functions import *
|
8 |
+
from time import time
|
9 |
+
import sys
|
10 |
+
from PyQt5 import QtWidgets
|
11 |
+
qapp=QtWidgets.QApplication([''])
|
12 |
+
|
13 |
+
|
14 |
+
def load_menpo_test_list(img_dir, test_data='full', image_size=256, margin=0.25, bb_type='gt'):
|
15 |
+
mode = 'TEST'
|
16 |
+
bb_dir = os.path.join(img_dir, 'Bounding_Boxes')
|
17 |
+
bb_dictionary = load_bb_dictionary(bb_dir, mode, test_data=test_data)
|
18 |
+
img_menpo_list = load_menpo_image_list(
|
19 |
+
img_dir=img_dir, train_crop_dir=None, img_dir_ns=None, mode=mode, bb_dictionary=bb_dictionary,
|
20 |
+
image_size=image_size, margin=margin,
|
21 |
+
bb_type=bb_type, test_data=test_data, augment_basic=False, augment_texture=False, p_texture=0,
|
22 |
+
augment_geom=False, p_geom=0)
|
23 |
+
return img_menpo_list
|
24 |
+
|
25 |
+
|
26 |
+
def evaluate_heatmap_fusion_network(model_path, img_path, test_data, batch_size=10, image_size=256, margin=0.25,
|
27 |
+
bb_type='gt', c_dim=3, scale=1, num_landmarks=68, debug=False,
|
28 |
+
debug_data_size=20):
|
29 |
+
t = time()
|
30 |
+
from deep_heatmaps_model_fusion_net import DeepHeatmapsModel
|
31 |
+
import logging
|
32 |
+
logging.getLogger('tensorflow').disabled = True
|
33 |
+
|
34 |
+
# load test image menpo list
|
35 |
+
|
36 |
+
test_menpo_img_list = load_menpo_test_list(
|
37 |
+
img_path, test_data=test_data, image_size=image_size, margin=margin, bb_type=bb_type)
|
38 |
+
|
39 |
+
if debug:
|
40 |
+
test_menpo_img_list = test_menpo_img_list[:debug_data_size]
|
41 |
+
print ('\n*** FUSION NETWORK: calculating normalized mean error on: ' + test_data +
|
42 |
+
' set (%d images - debug mode) ***' % debug_data_size)
|
43 |
+
else:
|
44 |
+
print ('\n*** FUSION NETWORK: calculating normalized mean error on: ' + test_data + ' set (%d images) ***' %
|
45 |
+
(len(test_menpo_img_list)))
|
46 |
+
|
47 |
+
# create heatmap model
|
48 |
+
|
49 |
+
tf.reset_default_graph()
|
50 |
+
|
51 |
+
model = DeepHeatmapsModel(mode='TEST', batch_size=batch_size, image_size=image_size, c_dim=c_dim,
|
52 |
+
num_landmarks=num_landmarks, img_path=img_path, test_model_path=model_path,
|
53 |
+
test_data=test_data, menpo_verbose=False)
|
54 |
+
|
55 |
+
# add placeholders
|
56 |
+
model.add_placeholders()
|
57 |
+
# build model
|
58 |
+
model.build_model()
|
59 |
+
# create loss ops
|
60 |
+
model.create_loss_ops()
|
61 |
+
|
62 |
+
num_batches = int(1. * len(test_menpo_img_list) / batch_size)
|
63 |
+
if num_batches == 0:
|
64 |
+
batch_size = len(test_menpo_img_list)
|
65 |
+
num_batches = 1
|
66 |
+
|
67 |
+
reminder = len(test_menpo_img_list) - num_batches * batch_size
|
68 |
+
num_batches_reminder = num_batches + 1 * (reminder > 0)
|
69 |
+
img_inds = np.arange(len(test_menpo_img_list))
|
70 |
+
|
71 |
+
with tf.Session() as session:
|
72 |
+
|
73 |
+
# load trained parameters
|
74 |
+
saver = tf.train.Saver()
|
75 |
+
saver.restore(session, model_path)
|
76 |
+
|
77 |
+
print ('\nnum batches: ' + str(num_batches_reminder))
|
78 |
+
|
79 |
+
err = []
|
80 |
+
for j in range(num_batches):
|
81 |
+
print ('batch %d / %d ...' % (j + 1, num_batches_reminder))
|
82 |
+
batch_inds = img_inds[j * batch_size:(j + 1) * batch_size]
|
83 |
+
|
84 |
+
batch_images, _, batch_landmarks_gt = load_images_landmarks(
|
85 |
+
test_menpo_img_list, batch_inds=batch_inds, image_size=image_size,
|
86 |
+
c_dim=c_dim, num_landmarks=num_landmarks, scale=scale)
|
87 |
+
|
88 |
+
batch_maps_pred = session.run(model.pred_hm_f, {model.images: batch_images})
|
89 |
+
|
90 |
+
batch_pred_landmarks = batch_heat_maps_to_landmarks(
|
91 |
+
batch_maps_pred, batch_size=batch_size, image_size=image_size, num_landmarks=num_landmarks)
|
92 |
+
|
93 |
+
batch_err = session.run(
|
94 |
+
model.nme_per_image, {model.lms: batch_landmarks_gt, model.pred_lms: batch_pred_landmarks})
|
95 |
+
err = np.hstack((err, batch_err))
|
96 |
+
|
97 |
+
if reminder > 0:
|
98 |
+
print ('batch %d / %d ...' % (j + 2, num_batches_reminder))
|
99 |
+
reminder_inds = img_inds[-reminder:]
|
100 |
+
|
101 |
+
batch_images, _, batch_landmarks_gt = load_images_landmarks(
|
102 |
+
test_menpo_img_list, batch_inds=reminder_inds, image_size=image_size,
|
103 |
+
c_dim=c_dim, num_landmarks=num_landmarks, scale=scale)
|
104 |
+
|
105 |
+
batch_maps_pred = session.run(model.pred_hm_f, {model.images: batch_images})
|
106 |
+
|
107 |
+
batch_pred_landmarks = batch_heat_maps_to_landmarks(
|
108 |
+
batch_maps_pred, batch_size=reminder, image_size=image_size, num_landmarks=num_landmarks)
|
109 |
+
|
110 |
+
batch_err = session.run(
|
111 |
+
model.nme_per_image, {model.lms: batch_landmarks_gt, model.pred_lms: batch_pred_landmarks})
|
112 |
+
err = np.hstack((err, batch_err))
|
113 |
+
|
114 |
+
print ('\ndone!')
|
115 |
+
print ('run time: ' + str(time() - t))
|
116 |
+
|
117 |
+
return err
|
118 |
+
|
119 |
+
|
120 |
+
def evaluate_heatmap_primary_network(model_path, img_path, test_data, batch_size=10, image_size=256, margin=0.25,
|
121 |
+
bb_type='gt', c_dim=3, scale=1, num_landmarks=68, debug=False,
|
122 |
+
debug_data_size=20):
|
123 |
+
t = time()
|
124 |
+
from deep_heatmaps_model_primary_net import DeepHeatmapsModel
|
125 |
+
import logging
|
126 |
+
logging.getLogger('tensorflow').disabled = True
|
127 |
+
|
128 |
+
# load test image menpo list
|
129 |
+
|
130 |
+
test_menpo_img_list = load_menpo_test_list(
|
131 |
+
img_path, test_data=test_data, image_size=image_size, margin=margin, bb_type=bb_type)
|
132 |
+
|
133 |
+
if debug:
|
134 |
+
test_menpo_img_list = test_menpo_img_list[:debug_data_size]
|
135 |
+
print ('\n*** PRIMARY NETWORK: calculating normalized mean error on: ' + test_data +
|
136 |
+
' set (%d images - debug mode) ***' % debug_data_size)
|
137 |
+
else:
|
138 |
+
print ('\n*** PRIMARY NETWORK: calculating normalized mean error on: ' + test_data +
|
139 |
+
' set (%d images) ***' % (len(test_menpo_img_list)))
|
140 |
+
|
141 |
+
# create heatmap model
|
142 |
+
|
143 |
+
tf.reset_default_graph()
|
144 |
+
|
145 |
+
model = DeepHeatmapsModel(mode='TEST', batch_size=batch_size, image_size=image_size, c_dim=c_dim,
|
146 |
+
num_landmarks=num_landmarks, img_path=img_path, test_model_path=model_path,
|
147 |
+
test_data=test_data, menpo_verbose=False)
|
148 |
+
|
149 |
+
# add placeholders
|
150 |
+
model.add_placeholders()
|
151 |
+
# build model
|
152 |
+
model.build_model()
|
153 |
+
# create loss ops
|
154 |
+
model.create_loss_ops()
|
155 |
+
|
156 |
+
num_batches = int(1. * len(test_menpo_img_list) / batch_size)
|
157 |
+
if num_batches == 0:
|
158 |
+
batch_size = len(test_menpo_img_list)
|
159 |
+
num_batches = 1
|
160 |
+
|
161 |
+
reminder = len(test_menpo_img_list) - num_batches * batch_size
|
162 |
+
num_batches_reminder = num_batches + 1 * (reminder > 0)
|
163 |
+
img_inds = np.arange(len(test_menpo_img_list))
|
164 |
+
|
165 |
+
with tf.Session() as session:
|
166 |
+
|
167 |
+
# load trained parameters
|
168 |
+
saver = tf.train.Saver()
|
169 |
+
saver.restore(session, model_path)
|
170 |
+
|
171 |
+
print ('\nnum batches: ' + str(num_batches_reminder))
|
172 |
+
|
173 |
+
err = []
|
174 |
+
for j in range(num_batches):
|
175 |
+
print ('batch %d / %d ...' % (j + 1, num_batches_reminder))
|
176 |
+
batch_inds = img_inds[j * batch_size:(j + 1) * batch_size]
|
177 |
+
|
178 |
+
batch_images, _, batch_landmarks_gt = load_images_landmarks(
|
179 |
+
test_menpo_img_list, batch_inds=batch_inds, image_size=image_size,
|
180 |
+
c_dim=c_dim, num_landmarks=num_landmarks, scale=scale)
|
181 |
+
|
182 |
+
batch_maps_small_pred = session.run(model.pred_hm_p, {model.images: batch_images})
|
183 |
+
|
184 |
+
batch_maps_small_pred = zoom(batch_maps_small_pred, zoom=[1, 4, 4, 1], order=1) # NN interpolation
|
185 |
+
|
186 |
+
batch_pred_landmarks = batch_heat_maps_to_landmarks(
|
187 |
+
batch_maps_small_pred, batch_size=batch_size, image_size=image_size,
|
188 |
+
num_landmarks=num_landmarks)
|
189 |
+
|
190 |
+
batch_err = session.run(
|
191 |
+
model.nme_per_image, {model.lms_small: batch_landmarks_gt, model.pred_lms_small: batch_pred_landmarks})
|
192 |
+
err = np.hstack((err, batch_err))
|
193 |
+
|
194 |
+
if reminder > 0:
|
195 |
+
print ('batch %d / %d ...' % (j + 2, num_batches_reminder))
|
196 |
+
reminder_inds = img_inds[-reminder:]
|
197 |
+
|
198 |
+
batch_images, _, batch_landmarks_gt = load_images_landmarks(
|
199 |
+
test_menpo_img_list, batch_inds=reminder_inds, image_size=image_size,
|
200 |
+
c_dim=c_dim, num_landmarks=num_landmarks, scale=scale)
|
201 |
+
|
202 |
+
batch_maps_small_pred = session.run(model.pred_hm_p, {model.images: batch_images})
|
203 |
+
|
204 |
+
batch_maps_small_pred = zoom(batch_maps_small_pred, zoom=[1, 4, 4, 1], order=1) # NN interpolation
|
205 |
+
|
206 |
+
batch_pred_landmarks = batch_heat_maps_to_landmarks(
|
207 |
+
batch_maps_small_pred, batch_size=reminder, image_size=image_size,
|
208 |
+
num_landmarks=num_landmarks)
|
209 |
+
|
210 |
+
batch_err = session.run(
|
211 |
+
model.nme_per_image, {model.lms_small: batch_landmarks_gt, model.pred_lms_small: batch_pred_landmarks})
|
212 |
+
err = np.hstack((err, batch_err))
|
213 |
+
|
214 |
+
print ('\ndone!')
|
215 |
+
print ('run time: ' + str(time() - t))
|
216 |
+
|
217 |
+
return err
|
218 |
+
|
219 |
+
|
220 |
+
def evaluate_heatmap_network(model_path, network_type, img_path, test_data, batch_size=10, image_size=256, margin=0.25,
|
221 |
+
bb_type='gt', c_dim=3, scale=1, num_landmarks=68, debug=False,
|
222 |
+
debug_data_size=20):
|
223 |
+
|
224 |
+
if network_type.lower() == 'fusion':
|
225 |
+
return evaluate_heatmap_fusion_network(
|
226 |
+
model_path=model_path, img_path=img_path, test_data=test_data, batch_size=batch_size, image_size=image_size,
|
227 |
+
margin=margin, bb_type=bb_type, c_dim=c_dim, scale=scale, num_landmarks=num_landmarks, debug=debug,
|
228 |
+
debug_data_size=debug_data_size)
|
229 |
+
elif network_type.lower() == 'primary':
|
230 |
+
return evaluate_heatmap_primary_network(
|
231 |
+
model_path=model_path, img_path=img_path, test_data=test_data, batch_size=batch_size, image_size=image_size,
|
232 |
+
margin=margin, bb_type=bb_type, c_dim=c_dim, scale=scale, num_landmarks=num_landmarks, debug=debug,
|
233 |
+
debug_data_size=debug_data_size)
|
234 |
+
else:
|
235 |
+
sys.exit('\n*** Error: please choose a valid network type: Fusion/Primary ***')
|
236 |
+
|
237 |
+
|
238 |
+
def AUC(errors, max_error, step_error=0.0001):
|
239 |
+
x_axis = list(np.arange(0., max_error + step_error, step_error))
|
240 |
+
ced = np.array(compute_cumulative_error(errors, x_axis))
|
241 |
+
return simps(ced, x=x_axis) / max_error, 1. - ced[-1]
|
242 |
+
|
243 |
+
|
244 |
+
def print_nme_statistics(
|
245 |
+
errors, model_path, network_type, test_data, max_error=0.08, log_path='', save_log=True, plot_ced=True,
|
246 |
+
norm='interocular distance'):
|
247 |
+
auc, failures = AUC(errors, max_error=max_error)
|
248 |
+
|
249 |
+
print ("\n****** NME statistics for " + network_type + " Network ******\n")
|
250 |
+
print ("* model path: " + model_path)
|
251 |
+
print ("* dataset: " + test_data + ' set')
|
252 |
+
|
253 |
+
print ("\n* Normalized mean error (percentage of "+norm+"): %.2f" % (100 * np.mean(errors)))
|
254 |
+
print ("\n* AUC @ %.2f: %.2f" % (max_error, 100 * auc))
|
255 |
+
print ("\n* failure rate @ %.2f: %.2f" % (max_error, 100 * failures) + '%')
|
256 |
+
|
257 |
+
if plot_ced:
|
258 |
+
plt.figure()
|
259 |
+
plt.yticks(np.linspace(0, 1, 11))
|
260 |
+
plot_cumulative_error_distribution(
|
261 |
+
list(errors),
|
262 |
+
legend_entries=[network_type],
|
263 |
+
marker_style=['s'],
|
264 |
+
marker_size=7,
|
265 |
+
x_label='Normalised Point-to-Point Error\n('+norm+')\n*' + test_data + ' set*',
|
266 |
+
)
|
267 |
+
|
268 |
+
if save_log:
|
269 |
+
with open(os.path.join(log_path, network_type.lower() + "_nme_statistics_on_" + test_data + "_set.txt"),
|
270 |
+
"wb") as f:
|
271 |
+
f.write(b"************************************************")
|
272 |
+
f.write(("\n****** NME statistics for " + str(network_type) + " Network ******\n").encode())
|
273 |
+
f.write(b"************************************************")
|
274 |
+
f.write(("\n\n* model path: " + str(model_path)).encode())
|
275 |
+
f.write(("\n\n* dataset: " + str(test_data) + ' set').encode())
|
276 |
+
f.write(b"\n\n* Normalized mean error (percentage of "+norm+"): %.2f" % (100 * np.mean(errors)))
|
277 |
+
f.write(b"\n\n* AUC @ %.2f: %.2f" % (max_error, 100 * auc))
|
278 |
+
f.write(("\n\n* failure rate @ %.2f: %.2f" % (max_error, 100 * failures) + '%').encode())
|
279 |
+
if plot_ced:
|
280 |
+
plt.savefig(os.path.join(log_path, network_type.lower() + '_nme_ced_on_' + test_data + '_set.png'),
|
281 |
+
bbox_inches='tight')
|
282 |
+
plt.close()
|
283 |
+
|
284 |
+
print ('\nlog path: ' + log_path)
|
285 |
+
|
286 |
+
|
287 |
+
def print_ced_compare_methods(
|
288 |
+
method_errors,method_names,test_data,log_path='', save_log=True, norm='interocular distance'):
|
289 |
+
plt.yticks(np.linspace(0, 1, 11))
|
290 |
+
plot_cumulative_error_distribution(
|
291 |
+
[list(err) for err in list(method_errors)],
|
292 |
+
legend_entries=list(method_names),
|
293 |
+
marker_style=['s'],
|
294 |
+
marker_size=7,
|
295 |
+
x_label='Normalised Point-to-Point Error\n('+norm+')\n*'+test_data+' set*'
|
296 |
+
)
|
297 |
+
if save_log:
|
298 |
+
plt.savefig(os.path.join(log_path,'nme_ced_on_'+test_data+'_set.png'), bbox_inches='tight')
|
299 |
+
print ('ced plot path: ' + os.path.join(log_path,'nme_ced_on_'+test_data+'_set.png'))
|
300 |
plt.close()
|
MakeItTalk/thirdparty/face_of_art/old/image_utils.py
CHANGED
@@ -1,590 +1,590 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import os
|
3 |
-
from scipy.io import loadmat
|
4 |
-
import cv2
|
5 |
-
from menpo.shape.pointcloud import PointCloud
|
6 |
-
from menpo.transform import ThinPlateSplines
|
7 |
-
import menpo.io as mio
|
8 |
-
import matplotlib.pyplot as plt
|
9 |
-
from scipy.ndimage import zoom
|
10 |
-
from glob import glob
|
11 |
-
from deformation_functions import *
|
12 |
-
|
13 |
-
'''********* bounding box and image loading functions *********'''
|
14 |
-
|
15 |
-
|
16 |
-
def center_margin_bb(bb, img_bounds, margin=0.25):
|
17 |
-
bb_size = ([bb[0, 2] - bb[0, 0], bb[0, 3] - bb[0, 1]])
|
18 |
-
margins = (np.max(bb_size) * (1 + margin) - bb_size) / 2
|
19 |
-
|
20 |
-
bb_new = np.zeros_like(bb)
|
21 |
-
bb_new[0, 0] = np.maximum(bb[0, 0] - margins[0], 0)
|
22 |
-
bb_new[0, 2] = np.minimum(bb[0, 2] + margins[0], img_bounds[1])
|
23 |
-
bb_new[0, 1] = np.maximum(bb[0, 1] - margins[1], 0)
|
24 |
-
bb_new[0, 3] = np.minimum(bb[0, 3] + margins[1], img_bounds[0])
|
25 |
-
return bb_new
|
26 |
-
|
27 |
-
|
28 |
-
def load_bb_files(bb_file_dirs):
|
29 |
-
bb_files_dict = {}
|
30 |
-
for bb_file in bb_file_dirs:
|
31 |
-
bb_mat = loadmat(bb_file)['bounding_boxes']
|
32 |
-
num_imgs = np.max(bb_mat.shape)
|
33 |
-
for i in range(num_imgs):
|
34 |
-
name = bb_mat[0][i][0][0][0][0]
|
35 |
-
bb_init = bb_mat[0][i][0][0][1] - 1 # matlab indicies
|
36 |
-
bb_gt = bb_mat[0][i][0][0][2] - 1 # matlab indicies
|
37 |
-
if str(name) in bb_files_dict.keys():
|
38 |
-
print str(name), 'already loaded from: ', bb_file
|
39 |
-
bb_files_dict[str(name)] = (bb_init, bb_gt)
|
40 |
-
return bb_files_dict
|
41 |
-
|
42 |
-
|
43 |
-
def load_bb_dictionary(bb_dir, mode, test_data='full'):
|
44 |
-
if mode == 'TRAIN':
|
45 |
-
bb_dirs = \
|
46 |
-
['bounding_boxes_afw.mat', 'bounding_boxes_helen_trainset.mat', 'bounding_boxes_lfpw_trainset.mat']
|
47 |
-
else:
|
48 |
-
if test_data == 'common':
|
49 |
-
bb_dirs = \
|
50 |
-
['bounding_boxes_helen_testset.mat', 'bounding_boxes_lfpw_testset.mat']
|
51 |
-
elif test_data == 'challenging':
|
52 |
-
bb_dirs = ['bounding_boxes_ibug.mat']
|
53 |
-
elif test_data == 'full':
|
54 |
-
bb_dirs = \
|
55 |
-
['bounding_boxes_ibug.mat', 'bounding_boxes_helen_testset.mat', 'bounding_boxes_lfpw_testset.mat']
|
56 |
-
elif test_data == 'training':
|
57 |
-
bb_dirs = \
|
58 |
-
['bounding_boxes_afw.mat', 'bounding_boxes_helen_trainset.mat', 'bounding_boxes_lfpw_trainset.mat']
|
59 |
-
else:
|
60 |
-
bb_dirs=None
|
61 |
-
|
62 |
-
if mode == 'TEST' and test_data not in ['full', 'challenging', 'common', 'training']:
|
63 |
-
bb_files_dict = None
|
64 |
-
else:
|
65 |
-
bb_dirs = [os.path.join(bb_dir, dataset) for dataset in bb_dirs]
|
66 |
-
bb_files_dict = load_bb_files(bb_dirs)
|
67 |
-
|
68 |
-
return bb_files_dict
|
69 |
-
|
70 |
-
|
71 |
-
def crop_to_face_image(img, bb_dictionary=None, gt=True, margin=0.25, image_size=256):
|
72 |
-
name = img.path.name
|
73 |
-
img_bounds = img.bounds()[1]
|
74 |
-
|
75 |
-
if bb_dictionary is None:
|
76 |
-
bb_menpo = img.landmarks['PTS'].bounding_box().points
|
77 |
-
bb = np.array([[bb_menpo[0, 1], bb_menpo[0, 0], bb_menpo[2, 1], bb_menpo[2, 0]]])
|
78 |
-
else:
|
79 |
-
if gt:
|
80 |
-
bb = bb_dictionary[name][1] # ground truth
|
81 |
-
else:
|
82 |
-
bb = bb_dictionary[name][0] # init from face detector
|
83 |
-
|
84 |
-
bb = center_margin_bb(bb, img_bounds, margin=margin)
|
85 |
-
|
86 |
-
bb_pointcloud = PointCloud(np.array([[bb[0, 1], bb[0, 0]],
|
87 |
-
[bb[0, 3], bb[0, 0]],
|
88 |
-
[bb[0, 3], bb[0, 2]],
|
89 |
-
[bb[0, 1], bb[0, 2]]]))
|
90 |
-
|
91 |
-
face_crop = img.crop_to_pointcloud(bb_pointcloud).resize([image_size, image_size])
|
92 |
-
|
93 |
-
return face_crop
|
94 |
-
|
95 |
-
|
96 |
-
def augment_face_image(img, image_size=256, crop_size=248, angle_range=30, flip=True):
|
97 |
-
|
98 |
-
# taken from MDM
|
99 |
-
jaw_indices = np.arange(0, 17)
|
100 |
-
lbrow_indices = np.arange(17, 22)
|
101 |
-
rbrow_indices = np.arange(22, 27)
|
102 |
-
upper_nose_indices = np.arange(27, 31)
|
103 |
-
lower_nose_indices = np.arange(31, 36)
|
104 |
-
leye_indices = np.arange(36, 42)
|
105 |
-
reye_indices = np.arange(42, 48)
|
106 |
-
outer_mouth_indices = np.arange(48, 60)
|
107 |
-
inner_mouth_indices = np.arange(60, 68)
|
108 |
-
|
109 |
-
mirrored_parts_68 = np.hstack([
|
110 |
-
jaw_indices[::-1], rbrow_indices[::-1], lbrow_indices[::-1],
|
111 |
-
upper_nose_indices, lower_nose_indices[::-1],
|
112 |
-
np.roll(reye_indices[::-1], 4), np.roll(leye_indices[::-1], 4),
|
113 |
-
np.roll(outer_mouth_indices[::-1], 7),
|
114 |
-
np.roll(inner_mouth_indices[::-1], 5)
|
115 |
-
])
|
116 |
-
|
117 |
-
def mirror_landmarks_68(lms, im_size):
|
118 |
-
return PointCloud(abs(np.array([0, im_size[1]]) - lms.as_vector(
|
119 |
-
).reshape(-1, 2))[mirrored_parts_68])
|
120 |
-
|
121 |
-
def mirror_image(im):
|
122 |
-
im = im.copy()
|
123 |
-
im.pixels = im.pixels[..., ::-1].copy()
|
124 |
-
|
125 |
-
for group in im.landmarks:
|
126 |
-
lms = im.landmarks[group]
|
127 |
-
if lms.points.shape[0] == 68:
|
128 |
-
im.landmarks[group] = mirror_landmarks_68(lms, im.shape)
|
129 |
-
|
130 |
-
return im
|
131 |
-
|
132 |
-
lim = image_size - crop_size
|
133 |
-
min_crop_inds = np.random.randint(0, lim, 2)
|
134 |
-
max_crop_inds = min_crop_inds + crop_size
|
135 |
-
flip_rand = np.random.random() > 0.5
|
136 |
-
rot_angle = 2 * angle_range * np.random.random_sample() - angle_range
|
137 |
-
|
138 |
-
if flip and flip_rand:
|
139 |
-
rand_crop = img.crop(min_crop_inds, max_crop_inds)
|
140 |
-
rand_crop = mirror_image(rand_crop)
|
141 |
-
rand_crop = rand_crop.rotate_ccw_about_centre(rot_angle).resize([image_size, image_size])
|
142 |
-
|
143 |
-
else:
|
144 |
-
rand_crop = img.crop(min_crop_inds, max_crop_inds). \
|
145 |
-
rotate_ccw_about_centre(rot_angle).resize([image_size, image_size])
|
146 |
-
|
147 |
-
return rand_crop
|
148 |
-
|
149 |
-
|
150 |
-
def load_menpo_image_list(img_dir, mode, bb_dictionary=None, image_size=256, margin=0.25, bb_type='gt',
|
151 |
-
test_data='full', augment=True):
|
152 |
-
def crop_to_face_image_gt(img, bb_dictionary=bb_dictionary, margin=margin, image_size=image_size):
|
153 |
-
return crop_to_face_image(img, bb_dictionary, gt=True, margin=margin, image_size=image_size)
|
154 |
-
|
155 |
-
def crop_to_face_image_init(img, bb_dictionary=bb_dictionary, margin=margin, image_size=image_size):
|
156 |
-
return crop_to_face_image(img, bb_dictionary, gt=False, margin=margin, image_size=image_size)
|
157 |
-
|
158 |
-
if mode is 'TRAIN':
|
159 |
-
img_set_dir = os.path.join(img_dir, 'training_set')
|
160 |
-
|
161 |
-
else:
|
162 |
-
img_set_dir = os.path.join(img_dir, test_data + '_set')
|
163 |
-
|
164 |
-
image_menpo_list = mio.import_images(img_set_dir, verbose=True)
|
165 |
-
|
166 |
-
if bb_type is 'gt':
|
167 |
-
face_crop_image_list = image_menpo_list.map(crop_to_face_image_gt)
|
168 |
-
else:
|
169 |
-
face_crop_image_list = image_menpo_list.map(crop_to_face_image_init)
|
170 |
-
|
171 |
-
if mode is 'TRAIN' and augment:
|
172 |
-
out_image_list = face_crop_image_list.map(augment_face_image)
|
173 |
-
else:
|
174 |
-
out_image_list = face_crop_image_list
|
175 |
-
|
176 |
-
return out_image_list
|
177 |
-
|
178 |
-
|
179 |
-
def augment_menpo_img_ns(img, img_dir_ns, p_ns=0):
|
180 |
-
img = img.copy()
|
181 |
-
texture_aug = p_ns > 0.5
|
182 |
-
if texture_aug:
|
183 |
-
ns_augs = glob(os.path.join(img_dir_ns, img.path.name.split('.')[0] + '*'))
|
184 |
-
num_augs = len(ns_augs)
|
185 |
-
if num_augs > 1:
|
186 |
-
ns_ind = np.random.randint(1, num_augs)
|
187 |
-
ns_aug = mio.import_image(ns_augs[ns_ind])
|
188 |
-
ns_pixels = ns_aug.pixels
|
189 |
-
img.pixels = ns_pixels
|
190 |
-
return img
|
191 |
-
|
192 |
-
|
193 |
-
def augment_menpo_img_geom(img, p_geom=0):
|
194 |
-
img = img.copy()
|
195 |
-
if p_geom > 0.5:
|
196 |
-
lms_geom_warp=deform_face_geometric_style(img.landmarks['PTS'].points.copy(),p_scale=p_geom,p_shift=p_geom)
|
197 |
-
img = warp_face_image_tps(img,PointCloud(lms_geom_warp))
|
198 |
-
return img
|
199 |
-
|
200 |
-
|
201 |
-
def warp_face_image_tps(img,new_shape):
|
202 |
-
tps = ThinPlateSplines(new_shape, img.landmarks['PTS'])
|
203 |
-
img_warp=img.warp_to_shape(img.shape,tps)
|
204 |
-
img_warp.landmarks['PTS']=new_shape
|
205 |
-
return img_warp
|
206 |
-
|
207 |
-
|
208 |
-
def load_menpo_image_list_artistic_aug(
|
209 |
-
img_dir, train_crop_dir, img_dir_ns, mode, bb_dictionary=None, image_size=256, margin=0.25,
|
210 |
-
bb_type='gt', test_data='full',augment_basic=True, augment_texture=False, p_texture=0,
|
211 |
-
augment_geom=False, p_geom=0):
|
212 |
-
|
213 |
-
def crop_to_face_image_gt(img):
|
214 |
-
return crop_to_face_image(img, bb_dictionary, gt=True, margin=margin, image_size=image_size)
|
215 |
-
|
216 |
-
def crop_to_face_image_init(img):
|
217 |
-
return crop_to_face_image(img, bb_dictionary, gt=False, margin=margin, image_size=image_size)
|
218 |
-
|
219 |
-
def augment_menpo_img_ns_rand(img):
|
220 |
-
return augment_menpo_img_ns(img, img_dir_ns, p_ns=1. * (np.random.rand() <= p_texture))
|
221 |
-
|
222 |
-
def augment_menpo_img_geom_rand(img):
|
223 |
-
return augment_menpo_img_geom(img, p_geom=1. * (np.random.rand() <= p_geom))
|
224 |
-
|
225 |
-
if mode is 'TRAIN':
|
226 |
-
img_set_dir = os.path.join(img_dir, train_crop_dir)
|
227 |
-
out_image_list = mio.import_images(img_set_dir, verbose=True)
|
228 |
-
|
229 |
-
if augment_texture:
|
230 |
-
out_image_list = out_image_list.map(augment_menpo_img_ns_rand)
|
231 |
-
if augment_geom:
|
232 |
-
out_image_list = out_image_list.map(augment_menpo_img_geom_rand)
|
233 |
-
if augment_basic:
|
234 |
-
out_image_list = out_image_list.map(augment_face_image)
|
235 |
-
|
236 |
-
else:
|
237 |
-
img_set_dir = os.path.join(img_dir, test_data + '_set')
|
238 |
-
out_image_list = mio.import_images(img_set_dir, verbose=True)
|
239 |
-
if test_data in ['full', 'challenging', 'common', 'training', 'test']:
|
240 |
-
if bb_type is 'gt':
|
241 |
-
out_image_list = out_image_list.map(crop_to_face_image_gt)
|
242 |
-
elif bb_type is 'init':
|
243 |
-
out_image_list = out_image_list.map(crop_to_face_image_init)
|
244 |
-
|
245 |
-
return out_image_list
|
246 |
-
|
247 |
-
|
248 |
-
def reload_img_menpo_list_artistic_aug_train(
|
249 |
-
img_dir, train_crop_dir, img_dir_ns, mode, train_inds, image_size=256,
|
250 |
-
augment_basic=True, augment_texture=False, p_texture=0, augment_geom=False, p_geom=0):
|
251 |
-
|
252 |
-
img_menpo_list = load_menpo_image_list_artistic_aug(
|
253 |
-
img_dir=img_dir, train_crop_dir=train_crop_dir, img_dir_ns=img_dir_ns, mode=mode,image_size=image_size,
|
254 |
-
augment_basic=augment_basic, augment_texture=augment_texture, p_texture=p_texture, augment_geom=augment_geom,
|
255 |
-
p_geom=p_geom)
|
256 |
-
|
257 |
-
img_menpo_list_train = img_menpo_list[train_inds]
|
258 |
-
|
259 |
-
return img_menpo_list_train
|
260 |
-
|
261 |
-
|
262 |
-
'''********* heat-maps and image loading functions *********'''
|
263 |
-
|
264 |
-
|
265 |
-
# look for: ECT-FaceAlignment/caffe/src/caffe/layers/data_heatmap.cpp
|
266 |
-
def gaussian(x, y, x0, y0, sigma=6):
|
267 |
-
return 1./(np.sqrt(2*np.pi)*sigma) * np.exp(-0.5 * ((x-x0)**2 + (y-y0)**2) / sigma**2)
|
268 |
-
|
269 |
-
|
270 |
-
def create_heat_maps(landmarks, num_landmarks=68, image_size=256, sigma=6):
|
271 |
-
|
272 |
-
x, y = np.mgrid[0:image_size, 0:image_size]
|
273 |
-
|
274 |
-
maps = np.zeros((image_size, image_size, num_landmarks))
|
275 |
-
|
276 |
-
for i in range(num_landmarks):
|
277 |
-
out = gaussian(x, y, landmarks[i,0], landmarks[i,1], sigma=sigma)
|
278 |
-
maps[:, :, i] = (8./3)*sigma*out # copied from ECT
|
279 |
-
|
280 |
-
return maps
|
281 |
-
|
282 |
-
|
283 |
-
def load_data(img_list, batch_inds, image_size=256, c_dim=3, num_landmarks=68 , sigma=6, scale='255',
|
284 |
-
save_landmarks=False, primary=False):
|
285 |
-
|
286 |
-
num_inputs = len(batch_inds)
|
287 |
-
batch_menpo_images = img_list[batch_inds]
|
288 |
-
|
289 |
-
images = np.zeros([num_inputs, image_size, image_size, c_dim]).astype('float32')
|
290 |
-
maps_small = np.zeros([num_inputs, image_size/4, image_size/4, num_landmarks]).astype('float32')
|
291 |
-
|
292 |
-
if primary:
|
293 |
-
maps = None
|
294 |
-
else:
|
295 |
-
maps = np.zeros([num_inputs, image_size, image_size, num_landmarks]).astype('float32')
|
296 |
-
|
297 |
-
if save_landmarks:
|
298 |
-
landmarks = np.zeros([num_inputs, num_landmarks, 2]).astype('float32')
|
299 |
-
else:
|
300 |
-
landmarks = None
|
301 |
-
|
302 |
-
for ind, img in enumerate(batch_menpo_images):
|
303 |
-
|
304 |
-
images[ind, :, :, :] = np.rollaxis(img.pixels, 0, 3)
|
305 |
-
|
306 |
-
if primary:
|
307 |
-
lms = img.resize([image_size/4,image_size/4]).landmarks['PTS'].points
|
308 |
-
maps_small[ind, :, :, :] = create_heat_maps(lms, num_landmarks, image_size/4, sigma)
|
309 |
-
else:
|
310 |
-
lms = img.landmarks['PTS'].points
|
311 |
-
maps[ind, :, :, :] = create_heat_maps(lms, num_landmarks, image_size, sigma)
|
312 |
-
maps_small[ind, :, :, :]=zoom(maps[ind, :, :, :],(0.25,0.25,1))
|
313 |
-
|
314 |
-
if save_landmarks:
|
315 |
-
landmarks[ind, :, :] = lms
|
316 |
-
|
317 |
-
if scale is '255':
|
318 |
-
images *= 255 # SAME AS ECT?
|
319 |
-
elif scale is '0':
|
320 |
-
images = 2 * images - 1
|
321 |
-
|
322 |
-
return images, maps, maps_small, landmarks
|
323 |
-
|
324 |
-
|
325 |
-
def heat_maps_to_image(maps, landmarks=None, image_size=256, num_landmarks=68):
|
326 |
-
|
327 |
-
if landmarks is None:
|
328 |
-
landmarks = heat_maps_to_landmarks(maps, image_size=image_size, num_landmarks=num_landmarks)
|
329 |
-
|
330 |
-
x, y = np.mgrid[0:image_size, 0:image_size]
|
331 |
-
|
332 |
-
pixel_dist = np.sqrt(
|
333 |
-
np.square(np.expand_dims(x, 2) - landmarks[:, 0]) + np.square(np.expand_dims(y, 2) - landmarks[:, 1]))
|
334 |
-
|
335 |
-
nn_landmark = np.argmin(pixel_dist, 2)
|
336 |
-
|
337 |
-
map_image = maps[x, y, nn_landmark]
|
338 |
-
map_image = (map_image-map_image.min())/(map_image.max()-map_image.min()) # normalize for visualization
|
339 |
-
|
340 |
-
return map_image
|
341 |
-
|
342 |
-
|
343 |
-
def heat_maps_to_landmarks(maps, image_size=256, num_landmarks=68):
|
344 |
-
|
345 |
-
landmarks = np.zeros((num_landmarks,2)).astype('float32')
|
346 |
-
|
347 |
-
for m_ind in range(num_landmarks):
|
348 |
-
landmarks[m_ind, :] = np.unravel_index(maps[:, :, m_ind].argmax(), (image_size, image_size))
|
349 |
-
|
350 |
-
return landmarks
|
351 |
-
|
352 |
-
|
353 |
-
def batch_heat_maps_to_landmarks(batch_maps, batch_size, image_size=256, num_landmarks=68):
|
354 |
-
batch_landmarks = np.zeros((batch_size,num_landmarks, 2)).astype('float32')
|
355 |
-
for i in range(batch_size):
|
356 |
-
batch_landmarks[i,:,:]=heat_maps_to_landmarks(
|
357 |
-
batch_maps[i,:,:,:], image_size=image_size, num_landmarks=num_landmarks)
|
358 |
-
|
359 |
-
return batch_landmarks
|
360 |
-
|
361 |
-
|
362 |
-
def print_training_params_to_file(init_locals):
|
363 |
-
del init_locals['self']
|
364 |
-
with open(os.path.join(init_locals['save_log_path'], 'Training_Parameters.txt'), 'w') as f:
|
365 |
-
f.write('Training Parameters:\n\n')
|
366 |
-
for key, value in init_locals.items():
|
367 |
-
f.write('* %s: %s\n' % (key, value))
|
368 |
-
|
369 |
-
|
370 |
-
def create_img_with_landmarks(image, landmarks, image_size=256, num_landmarks=68, scale='255', circle_size=2):
|
371 |
-
image = image.reshape(image_size, image_size, -1)
|
372 |
-
|
373 |
-
if scale is '0':
|
374 |
-
image = 127.5 * (image + 1)
|
375 |
-
elif scale is '1':
|
376 |
-
image *= 255
|
377 |
-
|
378 |
-
landmarks = landmarks.reshape(num_landmarks, 2)
|
379 |
-
landmarks = np.clip(landmarks, 0, image_size)
|
380 |
-
|
381 |
-
for (y, x) in landmarks.astype('int'):
|
382 |
-
cv2.circle(image, (x, y), circle_size, (255, 0, 0), -1)
|
383 |
-
|
384 |
-
return image
|
385 |
-
|
386 |
-
|
387 |
-
def merge_images_landmarks_maps(images, maps, image_size=256, num_landmarks=68, num_samples=9, scale='255',
|
388 |
-
circle_size=2):
|
389 |
-
images = images[:num_samples]
|
390 |
-
if maps.shape[1] is not image_size:
|
391 |
-
images = zoom(images, (1, 0.25, 0.25, 1))
|
392 |
-
image_size /= 4
|
393 |
-
cmap = plt.get_cmap('jet')
|
394 |
-
|
395 |
-
row = int(np.sqrt(num_samples))
|
396 |
-
merged = np.zeros([row * image_size, row * image_size * 2, 3])
|
397 |
-
|
398 |
-
for idx, img in enumerate(images):
|
399 |
-
i = idx // row
|
400 |
-
j = idx % row
|
401 |
-
|
402 |
-
img_lamdmarks = heat_maps_to_landmarks(maps[idx, :, :, :], image_size=image_size, num_landmarks=num_landmarks)
|
403 |
-
map_image = heat_maps_to_image(maps[idx, :, :, :], img_lamdmarks, image_size=image_size,
|
404 |
-
num_landmarks=num_landmarks)
|
405 |
-
|
406 |
-
rgba_map_image = cmap(map_image)
|
407 |
-
map_image = np.delete(rgba_map_image, 3, 2) * 255
|
408 |
-
|
409 |
-
img = create_img_with_landmarks(img, img_lamdmarks, image_size, num_landmarks, scale=scale,
|
410 |
-
circle_size=circle_size)
|
411 |
-
|
412 |
-
merged[i * image_size:(i + 1) * image_size, (j * 2) * image_size:(j * 2 + 1) * image_size, :] = img
|
413 |
-
merged[i * image_size:(i + 1) * image_size, (j * 2 + 1) * image_size:(j * 2 + 2) * image_size, :] = map_image
|
414 |
-
|
415 |
-
return merged
|
416 |
-
|
417 |
-
|
418 |
-
def merge_compare_maps(maps_small, maps, image_size=64, num_landmarks=68, num_samples=9):
|
419 |
-
|
420 |
-
maps_small = maps_small[:num_samples]
|
421 |
-
maps = maps[:num_samples]
|
422 |
-
|
423 |
-
if maps_small.shape[1] is not image_size:
|
424 |
-
image_size = maps_small.shape[1]
|
425 |
-
|
426 |
-
if maps.shape[1] is not maps_small.shape[1]:
|
427 |
-
maps_rescale = zoom(maps, (1, 0.25, 0.25, 1))
|
428 |
-
else:
|
429 |
-
maps_rescale = maps
|
430 |
-
|
431 |
-
cmap = plt.get_cmap('jet')
|
432 |
-
|
433 |
-
row = int(np.sqrt(num_samples))
|
434 |
-
merged = np.zeros([row * image_size, row * image_size * 2, 3])
|
435 |
-
|
436 |
-
for idx, map_small in enumerate(maps_small):
|
437 |
-
i = idx // row
|
438 |
-
j = idx % row
|
439 |
-
|
440 |
-
map_image_small = heat_maps_to_image(map_small, image_size=image_size, num_landmarks=num_landmarks)
|
441 |
-
map_image = heat_maps_to_image(maps_rescale[idx, :, :, :], image_size=image_size, num_landmarks=num_landmarks)
|
442 |
-
|
443 |
-
rgba_map_image = cmap(map_image)
|
444 |
-
map_image = np.delete(rgba_map_image, 3, 2) * 255
|
445 |
-
|
446 |
-
rgba_map_image_small = cmap(map_image_small)
|
447 |
-
map_image_small = np.delete(rgba_map_image_small, 3, 2) * 255
|
448 |
-
|
449 |
-
merged[i * image_size:(i + 1) * image_size, (j * 2) * image_size:(j * 2 + 1) * image_size, :] = map_image_small
|
450 |
-
merged[i * image_size:(i + 1) * image_size, (j * 2 + 1) * image_size:(j * 2 + 2) * image_size, :] = map_image
|
451 |
-
|
452 |
-
return merged
|
453 |
-
|
454 |
-
|
455 |
-
def normalize_map(map_in):
|
456 |
-
return (map_in - map_in.min()) / (map_in.max() - map_in.min())
|
457 |
-
|
458 |
-
|
459 |
-
def map_to_rgb(map_gray):
|
460 |
-
cmap = plt.get_cmap('jet')
|
461 |
-
rgba_map_image = cmap(map_gray)
|
462 |
-
map_rgb = np.delete(rgba_map_image, 3, 2) * 255
|
463 |
-
return map_rgb
|
464 |
-
|
465 |
-
|
466 |
-
def load_art_data(img_list, batch_inds, image_size=256, c_dim=3, scale='255'):
|
467 |
-
|
468 |
-
num_inputs = len(batch_inds)
|
469 |
-
batch_menpo_images = img_list[batch_inds]
|
470 |
-
|
471 |
-
images = np.zeros([num_inputs, image_size, image_size, c_dim]).astype('float32')
|
472 |
-
|
473 |
-
for ind, img in enumerate(batch_menpo_images):
|
474 |
-
images[ind, :, :, :] = np.rollaxis(img.pixels, 0, 3)
|
475 |
-
|
476 |
-
if scale is '255':
|
477 |
-
images *= 255 # SAME AS ECT?
|
478 |
-
elif scale is '0':
|
479 |
-
images = 2 * images - 1
|
480 |
-
|
481 |
-
return images
|
482 |
-
|
483 |
-
|
484 |
-
def merge_images_landmarks_maps_gt(images, maps, maps_gt, image_size=256, num_landmarks=68, num_samples=9, scale='255',
|
485 |
-
circle_size=2, test_data='full', fast=False):
|
486 |
-
images = images[:num_samples]
|
487 |
-
if maps.shape[1] is not image_size:
|
488 |
-
images = zoom(images, (1, 0.25, 0.25, 1))
|
489 |
-
image_size /= 4
|
490 |
-
if maps_gt.shape[1] is not image_size:
|
491 |
-
maps_gt = zoom(maps_gt, (1, 0.25, 0.25, 1))
|
492 |
-
|
493 |
-
cmap = plt.get_cmap('jet')
|
494 |
-
|
495 |
-
row = int(np.sqrt(num_samples))
|
496 |
-
merged = np.zeros([row * image_size, row * image_size * 3, 3])
|
497 |
-
|
498 |
-
if fast:
|
499 |
-
maps_gt_images = np.amax(maps_gt, 3)
|
500 |
-
maps_images = np.amax(maps, 3)
|
501 |
-
|
502 |
-
for idx, img in enumerate(images):
|
503 |
-
i = idx // row
|
504 |
-
j = idx % row
|
505 |
-
|
506 |
-
img_landmarks = heat_maps_to_landmarks(maps[idx, :, :, :], image_size=image_size, num_landmarks=num_landmarks)
|
507 |
-
|
508 |
-
if fast:
|
509 |
-
map_image = maps_images[idx]
|
510 |
-
else:
|
511 |
-
map_image = heat_maps_to_image(maps[idx, :, :, :], img_landmarks, image_size=image_size,
|
512 |
-
num_landmarks=num_landmarks)
|
513 |
-
rgba_map_image = cmap(map_image)
|
514 |
-
map_image = np.delete(rgba_map_image, 3, 2) * 255
|
515 |
-
|
516 |
-
if test_data not in ['full', 'challenging', 'common', 'training']:
|
517 |
-
map_gt_image = map_image.copy()
|
518 |
-
else:
|
519 |
-
if fast:
|
520 |
-
map_gt_image = maps_gt_images[idx]
|
521 |
-
else:
|
522 |
-
map_gt_image = heat_maps_to_image(maps_gt[idx, :, :, :], image_size=image_size, num_landmarks=num_landmarks)
|
523 |
-
rgba_map_gt_image = cmap(map_gt_image)
|
524 |
-
map_gt_image = np.delete(rgba_map_gt_image, 3, 2) * 255
|
525 |
-
|
526 |
-
img = create_img_with_landmarks(img, img_landmarks, image_size, num_landmarks, scale=scale,
|
527 |
-
circle_size=circle_size)
|
528 |
-
|
529 |
-
merged[i * image_size:(i + 1) * image_size, (j * 3) * image_size:(j * 3 + 1) * image_size, :] = img
|
530 |
-
merged[i * image_size:(i + 1) * image_size, (j * 3 + 1) * image_size:(j * 3 + 2) * image_size, :] = map_image
|
531 |
-
merged[i * image_size:(i + 1) * image_size, (j * 3 + 2) * image_size:(j * 3 + 3) * image_size, :] = map_gt_image
|
532 |
-
|
533 |
-
return merged
|
534 |
-
|
535 |
-
|
536 |
-
def map_comapre_channels(images,maps1, maps2, image_size=64, num_landmarks=68, scale='255',test_data='full'):
|
537 |
-
map1 = maps1[0]
|
538 |
-
map2 = maps2[0]
|
539 |
-
image = images[0]
|
540 |
-
|
541 |
-
if image.shape[0] is not image_size:
|
542 |
-
image = zoom(image, (0.25, 0.25, 1))
|
543 |
-
if scale is '1':
|
544 |
-
image *= 255
|
545 |
-
elif scale is '0':
|
546 |
-
image = 127.5 * (image + 1)
|
547 |
-
|
548 |
-
row = np.ceil(np.sqrt(num_landmarks)).astype(np.int64)
|
549 |
-
merged = np.zeros([row * image_size, row * image_size * 2, 3])
|
550 |
-
|
551 |
-
for idx in range(num_landmarks):
|
552 |
-
i = idx // row
|
553 |
-
j = idx % row
|
554 |
-
channel_map = map_to_rgb(normalize_map(map1[:, :, idx]))
|
555 |
-
if test_data not in ['full', 'challenging', 'common', 'training']:
|
556 |
-
channel_map2=channel_map.copy()
|
557 |
-
else:
|
558 |
-
channel_map2 = map_to_rgb(normalize_map(map2[:, :, idx]))
|
559 |
-
|
560 |
-
merged[i * image_size:(i + 1) * image_size, (j * 2) * image_size:(j * 2 + 1) * image_size, :] = channel_map
|
561 |
-
merged[i * image_size:(i + 1) * image_size, (j * 2 + 1) * image_size:(j * 2 + 2) * image_size, :] = channel_map2
|
562 |
-
|
563 |
-
i = (idx + 1) // row
|
564 |
-
j = (idx + 1) % row
|
565 |
-
merged[i * image_size:(i + 1) * image_size, (j * 2) * image_size:(j * 2 + 1) * image_size, :] = image
|
566 |
-
|
567 |
-
return merged
|
568 |
-
|
569 |
-
|
570 |
-
def train_val_shuffle_inds_per_epoch(valid_inds, train_inds, train_iter, batch_size, log_path, save_log=True):
|
571 |
-
np.random.seed(0)
|
572 |
-
num_train_images = len(train_inds)
|
573 |
-
num_epochs = int(np.ceil((1. * train_iter) / (1. * num_train_images / batch_size)))+1
|
574 |
-
epoch_inds_shuffle = np.zeros((num_epochs, num_train_images)).astype(int)
|
575 |
-
img_inds = np.arange(num_train_images)
|
576 |
-
for i in range(num_epochs):
|
577 |
-
np.random.shuffle(img_inds)
|
578 |
-
epoch_inds_shuffle[i, :] = img_inds
|
579 |
-
|
580 |
-
if save_log:
|
581 |
-
with open(os.path.join(log_path, "train_val_shuffle_inds.csv"), "wb") as f:
|
582 |
-
if valid_inds is not None:
|
583 |
-
f.write(b'valid inds\n')
|
584 |
-
np.savetxt(f, valid_inds.reshape(1, -1), fmt='%i', delimiter=",")
|
585 |
-
f.write(b'train inds\n')
|
586 |
-
np.savetxt(f, train_inds.reshape(1, -1), fmt='%i', delimiter=",")
|
587 |
-
f.write(b'shuffle inds\n')
|
588 |
-
np.savetxt(f, epoch_inds_shuffle, fmt='%i', delimiter=",")
|
589 |
-
|
590 |
-
return epoch_inds_shuffle
|
|
|
1 |
+
import numpy as np
|
2 |
+
import os
|
3 |
+
from scipy.io import loadmat
|
4 |
+
import cv2
|
5 |
+
from menpo.shape.pointcloud import PointCloud
|
6 |
+
from menpo.transform import ThinPlateSplines
|
7 |
+
import menpo.io as mio
|
8 |
+
import matplotlib.pyplot as plt
|
9 |
+
from scipy.ndimage import zoom
|
10 |
+
from glob import glob
|
11 |
+
from deformation_functions import *
|
12 |
+
|
13 |
+
'''********* bounding box and image loading functions *********'''
|
14 |
+
|
15 |
+
|
16 |
+
def center_margin_bb(bb, img_bounds, margin=0.25):
|
17 |
+
bb_size = ([bb[0, 2] - bb[0, 0], bb[0, 3] - bb[0, 1]])
|
18 |
+
margins = (np.max(bb_size) * (1 + margin) - bb_size) / 2
|
19 |
+
|
20 |
+
bb_new = np.zeros_like(bb)
|
21 |
+
bb_new[0, 0] = np.maximum(bb[0, 0] - margins[0], 0)
|
22 |
+
bb_new[0, 2] = np.minimum(bb[0, 2] + margins[0], img_bounds[1])
|
23 |
+
bb_new[0, 1] = np.maximum(bb[0, 1] - margins[1], 0)
|
24 |
+
bb_new[0, 3] = np.minimum(bb[0, 3] + margins[1], img_bounds[0])
|
25 |
+
return bb_new
|
26 |
+
|
27 |
+
|
28 |
+
def load_bb_files(bb_file_dirs):
|
29 |
+
bb_files_dict = {}
|
30 |
+
for bb_file in bb_file_dirs:
|
31 |
+
bb_mat = loadmat(bb_file)['bounding_boxes']
|
32 |
+
num_imgs = np.max(bb_mat.shape)
|
33 |
+
for i in range(num_imgs):
|
34 |
+
name = bb_mat[0][i][0][0][0][0]
|
35 |
+
bb_init = bb_mat[0][i][0][0][1] - 1 # matlab indicies
|
36 |
+
bb_gt = bb_mat[0][i][0][0][2] - 1 # matlab indicies
|
37 |
+
if str(name) in bb_files_dict.keys():
|
38 |
+
print str(name), 'already loaded from: ', bb_file
|
39 |
+
bb_files_dict[str(name)] = (bb_init, bb_gt)
|
40 |
+
return bb_files_dict
|
41 |
+
|
42 |
+
|
43 |
+
def load_bb_dictionary(bb_dir, mode, test_data='full'):
|
44 |
+
if mode == 'TRAIN':
|
45 |
+
bb_dirs = \
|
46 |
+
['bounding_boxes_afw.mat', 'bounding_boxes_helen_trainset.mat', 'bounding_boxes_lfpw_trainset.mat']
|
47 |
+
else:
|
48 |
+
if test_data == 'common':
|
49 |
+
bb_dirs = \
|
50 |
+
['bounding_boxes_helen_testset.mat', 'bounding_boxes_lfpw_testset.mat']
|
51 |
+
elif test_data == 'challenging':
|
52 |
+
bb_dirs = ['bounding_boxes_ibug.mat']
|
53 |
+
elif test_data == 'full':
|
54 |
+
bb_dirs = \
|
55 |
+
['bounding_boxes_ibug.mat', 'bounding_boxes_helen_testset.mat', 'bounding_boxes_lfpw_testset.mat']
|
56 |
+
elif test_data == 'training':
|
57 |
+
bb_dirs = \
|
58 |
+
['bounding_boxes_afw.mat', 'bounding_boxes_helen_trainset.mat', 'bounding_boxes_lfpw_trainset.mat']
|
59 |
+
else:
|
60 |
+
bb_dirs=None
|
61 |
+
|
62 |
+
if mode == 'TEST' and test_data not in ['full', 'challenging', 'common', 'training']:
|
63 |
+
bb_files_dict = None
|
64 |
+
else:
|
65 |
+
bb_dirs = [os.path.join(bb_dir, dataset) for dataset in bb_dirs]
|
66 |
+
bb_files_dict = load_bb_files(bb_dirs)
|
67 |
+
|
68 |
+
return bb_files_dict
|
69 |
+
|
70 |
+
|
71 |
+
def crop_to_face_image(img, bb_dictionary=None, gt=True, margin=0.25, image_size=256):
|
72 |
+
name = img.path.name
|
73 |
+
img_bounds = img.bounds()[1]
|
74 |
+
|
75 |
+
if bb_dictionary is None:
|
76 |
+
bb_menpo = img.landmarks['PTS'].bounding_box().points
|
77 |
+
bb = np.array([[bb_menpo[0, 1], bb_menpo[0, 0], bb_menpo[2, 1], bb_menpo[2, 0]]])
|
78 |
+
else:
|
79 |
+
if gt:
|
80 |
+
bb = bb_dictionary[name][1] # ground truth
|
81 |
+
else:
|
82 |
+
bb = bb_dictionary[name][0] # init from face detector
|
83 |
+
|
84 |
+
bb = center_margin_bb(bb, img_bounds, margin=margin)
|
85 |
+
|
86 |
+
bb_pointcloud = PointCloud(np.array([[bb[0, 1], bb[0, 0]],
|
87 |
+
[bb[0, 3], bb[0, 0]],
|
88 |
+
[bb[0, 3], bb[0, 2]],
|
89 |
+
[bb[0, 1], bb[0, 2]]]))
|
90 |
+
|
91 |
+
face_crop = img.crop_to_pointcloud(bb_pointcloud).resize([image_size, image_size])
|
92 |
+
|
93 |
+
return face_crop
|
94 |
+
|
95 |
+
|
96 |
+
def augment_face_image(img, image_size=256, crop_size=248, angle_range=30, flip=True):
|
97 |
+
|
98 |
+
# taken from MDM
|
99 |
+
jaw_indices = np.arange(0, 17)
|
100 |
+
lbrow_indices = np.arange(17, 22)
|
101 |
+
rbrow_indices = np.arange(22, 27)
|
102 |
+
upper_nose_indices = np.arange(27, 31)
|
103 |
+
lower_nose_indices = np.arange(31, 36)
|
104 |
+
leye_indices = np.arange(36, 42)
|
105 |
+
reye_indices = np.arange(42, 48)
|
106 |
+
outer_mouth_indices = np.arange(48, 60)
|
107 |
+
inner_mouth_indices = np.arange(60, 68)
|
108 |
+
|
109 |
+
mirrored_parts_68 = np.hstack([
|
110 |
+
jaw_indices[::-1], rbrow_indices[::-1], lbrow_indices[::-1],
|
111 |
+
upper_nose_indices, lower_nose_indices[::-1],
|
112 |
+
np.roll(reye_indices[::-1], 4), np.roll(leye_indices[::-1], 4),
|
113 |
+
np.roll(outer_mouth_indices[::-1], 7),
|
114 |
+
np.roll(inner_mouth_indices[::-1], 5)
|
115 |
+
])
|
116 |
+
|
117 |
+
def mirror_landmarks_68(lms, im_size):
|
118 |
+
return PointCloud(abs(np.array([0, im_size[1]]) - lms.as_vector(
|
119 |
+
).reshape(-1, 2))[mirrored_parts_68])
|
120 |
+
|
121 |
+
def mirror_image(im):
|
122 |
+
im = im.copy()
|
123 |
+
im.pixels = im.pixels[..., ::-1].copy()
|
124 |
+
|
125 |
+
for group in im.landmarks:
|
126 |
+
lms = im.landmarks[group]
|
127 |
+
if lms.points.shape[0] == 68:
|
128 |
+
im.landmarks[group] = mirror_landmarks_68(lms, im.shape)
|
129 |
+
|
130 |
+
return im
|
131 |
+
|
132 |
+
lim = image_size - crop_size
|
133 |
+
min_crop_inds = np.random.randint(0, lim, 2)
|
134 |
+
max_crop_inds = min_crop_inds + crop_size
|
135 |
+
flip_rand = np.random.random() > 0.5
|
136 |
+
rot_angle = 2 * angle_range * np.random.random_sample() - angle_range
|
137 |
+
|
138 |
+
if flip and flip_rand:
|
139 |
+
rand_crop = img.crop(min_crop_inds, max_crop_inds)
|
140 |
+
rand_crop = mirror_image(rand_crop)
|
141 |
+
rand_crop = rand_crop.rotate_ccw_about_centre(rot_angle).resize([image_size, image_size])
|
142 |
+
|
143 |
+
else:
|
144 |
+
rand_crop = img.crop(min_crop_inds, max_crop_inds). \
|
145 |
+
rotate_ccw_about_centre(rot_angle).resize([image_size, image_size])
|
146 |
+
|
147 |
+
return rand_crop
|
148 |
+
|
149 |
+
|
150 |
+
def load_menpo_image_list(img_dir, mode, bb_dictionary=None, image_size=256, margin=0.25, bb_type='gt',
|
151 |
+
test_data='full', augment=True):
|
152 |
+
def crop_to_face_image_gt(img, bb_dictionary=bb_dictionary, margin=margin, image_size=image_size):
|
153 |
+
return crop_to_face_image(img, bb_dictionary, gt=True, margin=margin, image_size=image_size)
|
154 |
+
|
155 |
+
def crop_to_face_image_init(img, bb_dictionary=bb_dictionary, margin=margin, image_size=image_size):
|
156 |
+
return crop_to_face_image(img, bb_dictionary, gt=False, margin=margin, image_size=image_size)
|
157 |
+
|
158 |
+
if mode is 'TRAIN':
|
159 |
+
img_set_dir = os.path.join(img_dir, 'training_set')
|
160 |
+
|
161 |
+
else:
|
162 |
+
img_set_dir = os.path.join(img_dir, test_data + '_set')
|
163 |
+
|
164 |
+
image_menpo_list = mio.import_images(img_set_dir, verbose=True)
|
165 |
+
|
166 |
+
if bb_type is 'gt':
|
167 |
+
face_crop_image_list = image_menpo_list.map(crop_to_face_image_gt)
|
168 |
+
else:
|
169 |
+
face_crop_image_list = image_menpo_list.map(crop_to_face_image_init)
|
170 |
+
|
171 |
+
if mode is 'TRAIN' and augment:
|
172 |
+
out_image_list = face_crop_image_list.map(augment_face_image)
|
173 |
+
else:
|
174 |
+
out_image_list = face_crop_image_list
|
175 |
+
|
176 |
+
return out_image_list
|
177 |
+
|
178 |
+
|
179 |
+
def augment_menpo_img_ns(img, img_dir_ns, p_ns=0):
|
180 |
+
img = img.copy()
|
181 |
+
texture_aug = p_ns > 0.5
|
182 |
+
if texture_aug:
|
183 |
+
ns_augs = glob(os.path.join(img_dir_ns, img.path.name.split('.')[0] + '*'))
|
184 |
+
num_augs = len(ns_augs)
|
185 |
+
if num_augs > 1:
|
186 |
+
ns_ind = np.random.randint(1, num_augs)
|
187 |
+
ns_aug = mio.import_image(ns_augs[ns_ind])
|
188 |
+
ns_pixels = ns_aug.pixels
|
189 |
+
img.pixels = ns_pixels
|
190 |
+
return img
|
191 |
+
|
192 |
+
|
193 |
+
def augment_menpo_img_geom(img, p_geom=0):
|
194 |
+
img = img.copy()
|
195 |
+
if p_geom > 0.5:
|
196 |
+
lms_geom_warp=deform_face_geometric_style(img.landmarks['PTS'].points.copy(),p_scale=p_geom,p_shift=p_geom)
|
197 |
+
img = warp_face_image_tps(img,PointCloud(lms_geom_warp))
|
198 |
+
return img
|
199 |
+
|
200 |
+
|
201 |
+
def warp_face_image_tps(img,new_shape):
|
202 |
+
tps = ThinPlateSplines(new_shape, img.landmarks['PTS'])
|
203 |
+
img_warp=img.warp_to_shape(img.shape,tps)
|
204 |
+
img_warp.landmarks['PTS']=new_shape
|
205 |
+
return img_warp
|
206 |
+
|
207 |
+
|
208 |
+
def load_menpo_image_list_artistic_aug(
|
209 |
+
img_dir, train_crop_dir, img_dir_ns, mode, bb_dictionary=None, image_size=256, margin=0.25,
|
210 |
+
bb_type='gt', test_data='full',augment_basic=True, augment_texture=False, p_texture=0,
|
211 |
+
augment_geom=False, p_geom=0):
|
212 |
+
|
213 |
+
def crop_to_face_image_gt(img):
|
214 |
+
return crop_to_face_image(img, bb_dictionary, gt=True, margin=margin, image_size=image_size)
|
215 |
+
|
216 |
+
def crop_to_face_image_init(img):
|
217 |
+
return crop_to_face_image(img, bb_dictionary, gt=False, margin=margin, image_size=image_size)
|
218 |
+
|
219 |
+
def augment_menpo_img_ns_rand(img):
|
220 |
+
return augment_menpo_img_ns(img, img_dir_ns, p_ns=1. * (np.random.rand() <= p_texture))
|
221 |
+
|
222 |
+
def augment_menpo_img_geom_rand(img):
|
223 |
+
return augment_menpo_img_geom(img, p_geom=1. * (np.random.rand() <= p_geom))
|
224 |
+
|
225 |
+
if mode is 'TRAIN':
|
226 |
+
img_set_dir = os.path.join(img_dir, train_crop_dir)
|
227 |
+
out_image_list = mio.import_images(img_set_dir, verbose=True)
|
228 |
+
|
229 |
+
if augment_texture:
|
230 |
+
out_image_list = out_image_list.map(augment_menpo_img_ns_rand)
|
231 |
+
if augment_geom:
|
232 |
+
out_image_list = out_image_list.map(augment_menpo_img_geom_rand)
|
233 |
+
if augment_basic:
|
234 |
+
out_image_list = out_image_list.map(augment_face_image)
|
235 |
+
|
236 |
+
else:
|
237 |
+
img_set_dir = os.path.join(img_dir, test_data + '_set')
|
238 |
+
out_image_list = mio.import_images(img_set_dir, verbose=True)
|
239 |
+
if test_data in ['full', 'challenging', 'common', 'training', 'test']:
|
240 |
+
if bb_type is 'gt':
|
241 |
+
out_image_list = out_image_list.map(crop_to_face_image_gt)
|
242 |
+
elif bb_type is 'init':
|
243 |
+
out_image_list = out_image_list.map(crop_to_face_image_init)
|
244 |
+
|
245 |
+
return out_image_list
|
246 |
+
|
247 |
+
|
248 |
+
def reload_img_menpo_list_artistic_aug_train(
|
249 |
+
img_dir, train_crop_dir, img_dir_ns, mode, train_inds, image_size=256,
|
250 |
+
augment_basic=True, augment_texture=False, p_texture=0, augment_geom=False, p_geom=0):
|
251 |
+
|
252 |
+
img_menpo_list = load_menpo_image_list_artistic_aug(
|
253 |
+
img_dir=img_dir, train_crop_dir=train_crop_dir, img_dir_ns=img_dir_ns, mode=mode,image_size=image_size,
|
254 |
+
augment_basic=augment_basic, augment_texture=augment_texture, p_texture=p_texture, augment_geom=augment_geom,
|
255 |
+
p_geom=p_geom)
|
256 |
+
|
257 |
+
img_menpo_list_train = img_menpo_list[train_inds]
|
258 |
+
|
259 |
+
return img_menpo_list_train
|
260 |
+
|
261 |
+
|
262 |
+
'''********* heat-maps and image loading functions *********'''
|
263 |
+
|
264 |
+
|
265 |
+
# look for: ECT-FaceAlignment/caffe/src/caffe/layers/data_heatmap.cpp
|
266 |
+
def gaussian(x, y, x0, y0, sigma=6):
|
267 |
+
return 1./(np.sqrt(2*np.pi)*sigma) * np.exp(-0.5 * ((x-x0)**2 + (y-y0)**2) / sigma**2)
|
268 |
+
|
269 |
+
|
270 |
+
def create_heat_maps(landmarks, num_landmarks=68, image_size=256, sigma=6):
|
271 |
+
|
272 |
+
x, y = np.mgrid[0:image_size, 0:image_size]
|
273 |
+
|
274 |
+
maps = np.zeros((image_size, image_size, num_landmarks))
|
275 |
+
|
276 |
+
for i in range(num_landmarks):
|
277 |
+
out = gaussian(x, y, landmarks[i,0], landmarks[i,1], sigma=sigma)
|
278 |
+
maps[:, :, i] = (8./3)*sigma*out # copied from ECT
|
279 |
+
|
280 |
+
return maps
|
281 |
+
|
282 |
+
|
283 |
+
def load_data(img_list, batch_inds, image_size=256, c_dim=3, num_landmarks=68 , sigma=6, scale='255',
|
284 |
+
save_landmarks=False, primary=False):
|
285 |
+
|
286 |
+
num_inputs = len(batch_inds)
|
287 |
+
batch_menpo_images = img_list[batch_inds]
|
288 |
+
|
289 |
+
images = np.zeros([num_inputs, image_size, image_size, c_dim]).astype('float32')
|
290 |
+
maps_small = np.zeros([num_inputs, image_size/4, image_size/4, num_landmarks]).astype('float32')
|
291 |
+
|
292 |
+
if primary:
|
293 |
+
maps = None
|
294 |
+
else:
|
295 |
+
maps = np.zeros([num_inputs, image_size, image_size, num_landmarks]).astype('float32')
|
296 |
+
|
297 |
+
if save_landmarks:
|
298 |
+
landmarks = np.zeros([num_inputs, num_landmarks, 2]).astype('float32')
|
299 |
+
else:
|
300 |
+
landmarks = None
|
301 |
+
|
302 |
+
for ind, img in enumerate(batch_menpo_images):
|
303 |
+
|
304 |
+
images[ind, :, :, :] = np.rollaxis(img.pixels, 0, 3)
|
305 |
+
|
306 |
+
if primary:
|
307 |
+
lms = img.resize([image_size/4,image_size/4]).landmarks['PTS'].points
|
308 |
+
maps_small[ind, :, :, :] = create_heat_maps(lms, num_landmarks, image_size/4, sigma)
|
309 |
+
else:
|
310 |
+
lms = img.landmarks['PTS'].points
|
311 |
+
maps[ind, :, :, :] = create_heat_maps(lms, num_landmarks, image_size, sigma)
|
312 |
+
maps_small[ind, :, :, :]=zoom(maps[ind, :, :, :],(0.25,0.25,1))
|
313 |
+
|
314 |
+
if save_landmarks:
|
315 |
+
landmarks[ind, :, :] = lms
|
316 |
+
|
317 |
+
if scale is '255':
|
318 |
+
images *= 255 # SAME AS ECT?
|
319 |
+
elif scale is '0':
|
320 |
+
images = 2 * images - 1
|
321 |
+
|
322 |
+
return images, maps, maps_small, landmarks
|
323 |
+
|
324 |
+
|
325 |
+
def heat_maps_to_image(maps, landmarks=None, image_size=256, num_landmarks=68):
|
326 |
+
|
327 |
+
if landmarks is None:
|
328 |
+
landmarks = heat_maps_to_landmarks(maps, image_size=image_size, num_landmarks=num_landmarks)
|
329 |
+
|
330 |
+
x, y = np.mgrid[0:image_size, 0:image_size]
|
331 |
+
|
332 |
+
pixel_dist = np.sqrt(
|
333 |
+
np.square(np.expand_dims(x, 2) - landmarks[:, 0]) + np.square(np.expand_dims(y, 2) - landmarks[:, 1]))
|
334 |
+
|
335 |
+
nn_landmark = np.argmin(pixel_dist, 2)
|
336 |
+
|
337 |
+
map_image = maps[x, y, nn_landmark]
|
338 |
+
map_image = (map_image-map_image.min())/(map_image.max()-map_image.min()) # normalize for visualization
|
339 |
+
|
340 |
+
return map_image
|
341 |
+
|
342 |
+
|
343 |
+
def heat_maps_to_landmarks(maps, image_size=256, num_landmarks=68):
|
344 |
+
|
345 |
+
landmarks = np.zeros((num_landmarks,2)).astype('float32')
|
346 |
+
|
347 |
+
for m_ind in range(num_landmarks):
|
348 |
+
landmarks[m_ind, :] = np.unravel_index(maps[:, :, m_ind].argmax(), (image_size, image_size))
|
349 |
+
|
350 |
+
return landmarks
|
351 |
+
|
352 |
+
|
353 |
+
def batch_heat_maps_to_landmarks(batch_maps, batch_size, image_size=256, num_landmarks=68):
|
354 |
+
batch_landmarks = np.zeros((batch_size,num_landmarks, 2)).astype('float32')
|
355 |
+
for i in range(batch_size):
|
356 |
+
batch_landmarks[i,:,:]=heat_maps_to_landmarks(
|
357 |
+
batch_maps[i,:,:,:], image_size=image_size, num_landmarks=num_landmarks)
|
358 |
+
|
359 |
+
return batch_landmarks
|
360 |
+
|
361 |
+
|
362 |
+
def print_training_params_to_file(init_locals):
|
363 |
+
del init_locals['self']
|
364 |
+
with open(os.path.join(init_locals['save_log_path'], 'Training_Parameters.txt'), 'w') as f:
|
365 |
+
f.write('Training Parameters:\n\n')
|
366 |
+
for key, value in init_locals.items():
|
367 |
+
f.write('* %s: %s\n' % (key, value))
|
368 |
+
|
369 |
+
|
370 |
+
def create_img_with_landmarks(image, landmarks, image_size=256, num_landmarks=68, scale='255', circle_size=2):
|
371 |
+
image = image.reshape(image_size, image_size, -1)
|
372 |
+
|
373 |
+
if scale is '0':
|
374 |
+
image = 127.5 * (image + 1)
|
375 |
+
elif scale is '1':
|
376 |
+
image *= 255
|
377 |
+
|
378 |
+
landmarks = landmarks.reshape(num_landmarks, 2)
|
379 |
+
landmarks = np.clip(landmarks, 0, image_size)
|
380 |
+
|
381 |
+
for (y, x) in landmarks.astype('int'):
|
382 |
+
cv2.circle(image, (x, y), circle_size, (255, 0, 0), -1)
|
383 |
+
|
384 |
+
return image
|
385 |
+
|
386 |
+
|
387 |
+
def merge_images_landmarks_maps(images, maps, image_size=256, num_landmarks=68, num_samples=9, scale='255',
|
388 |
+
circle_size=2):
|
389 |
+
images = images[:num_samples]
|
390 |
+
if maps.shape[1] is not image_size:
|
391 |
+
images = zoom(images, (1, 0.25, 0.25, 1))
|
392 |
+
image_size /= 4
|
393 |
+
cmap = plt.get_cmap('jet')
|
394 |
+
|
395 |
+
row = int(np.sqrt(num_samples))
|
396 |
+
merged = np.zeros([row * image_size, row * image_size * 2, 3])
|
397 |
+
|
398 |
+
for idx, img in enumerate(images):
|
399 |
+
i = idx // row
|
400 |
+
j = idx % row
|
401 |
+
|
402 |
+
img_lamdmarks = heat_maps_to_landmarks(maps[idx, :, :, :], image_size=image_size, num_landmarks=num_landmarks)
|
403 |
+
map_image = heat_maps_to_image(maps[idx, :, :, :], img_lamdmarks, image_size=image_size,
|
404 |
+
num_landmarks=num_landmarks)
|
405 |
+
|
406 |
+
rgba_map_image = cmap(map_image)
|
407 |
+
map_image = np.delete(rgba_map_image, 3, 2) * 255
|
408 |
+
|
409 |
+
img = create_img_with_landmarks(img, img_lamdmarks, image_size, num_landmarks, scale=scale,
|
410 |
+
circle_size=circle_size)
|
411 |
+
|
412 |
+
merged[i * image_size:(i + 1) * image_size, (j * 2) * image_size:(j * 2 + 1) * image_size, :] = img
|
413 |
+
merged[i * image_size:(i + 1) * image_size, (j * 2 + 1) * image_size:(j * 2 + 2) * image_size, :] = map_image
|
414 |
+
|
415 |
+
return merged
|
416 |
+
|
417 |
+
|
418 |
+
def merge_compare_maps(maps_small, maps, image_size=64, num_landmarks=68, num_samples=9):
|
419 |
+
|
420 |
+
maps_small = maps_small[:num_samples]
|
421 |
+
maps = maps[:num_samples]
|
422 |
+
|
423 |
+
if maps_small.shape[1] is not image_size:
|
424 |
+
image_size = maps_small.shape[1]
|
425 |
+
|
426 |
+
if maps.shape[1] is not maps_small.shape[1]:
|
427 |
+
maps_rescale = zoom(maps, (1, 0.25, 0.25, 1))
|
428 |
+
else:
|
429 |
+
maps_rescale = maps
|
430 |
+
|
431 |
+
cmap = plt.get_cmap('jet')
|
432 |
+
|
433 |
+
row = int(np.sqrt(num_samples))
|
434 |
+
merged = np.zeros([row * image_size, row * image_size * 2, 3])
|
435 |
+
|
436 |
+
for idx, map_small in enumerate(maps_small):
|
437 |
+
i = idx // row
|
438 |
+
j = idx % row
|
439 |
+
|
440 |
+
map_image_small = heat_maps_to_image(map_small, image_size=image_size, num_landmarks=num_landmarks)
|
441 |
+
map_image = heat_maps_to_image(maps_rescale[idx, :, :, :], image_size=image_size, num_landmarks=num_landmarks)
|
442 |
+
|
443 |
+
rgba_map_image = cmap(map_image)
|
444 |
+
map_image = np.delete(rgba_map_image, 3, 2) * 255
|
445 |
+
|
446 |
+
rgba_map_image_small = cmap(map_image_small)
|
447 |
+
map_image_small = np.delete(rgba_map_image_small, 3, 2) * 255
|
448 |
+
|
449 |
+
merged[i * image_size:(i + 1) * image_size, (j * 2) * image_size:(j * 2 + 1) * image_size, :] = map_image_small
|
450 |
+
merged[i * image_size:(i + 1) * image_size, (j * 2 + 1) * image_size:(j * 2 + 2) * image_size, :] = map_image
|
451 |
+
|
452 |
+
return merged
|
453 |
+
|
454 |
+
|
455 |
+
def normalize_map(map_in):
|
456 |
+
return (map_in - map_in.min()) / (map_in.max() - map_in.min())
|
457 |
+
|
458 |
+
|
459 |
+
def map_to_rgb(map_gray):
|
460 |
+
cmap = plt.get_cmap('jet')
|
461 |
+
rgba_map_image = cmap(map_gray)
|
462 |
+
map_rgb = np.delete(rgba_map_image, 3, 2) * 255
|
463 |
+
return map_rgb
|
464 |
+
|
465 |
+
|
466 |
+
def load_art_data(img_list, batch_inds, image_size=256, c_dim=3, scale='255'):
|
467 |
+
|
468 |
+
num_inputs = len(batch_inds)
|
469 |
+
batch_menpo_images = img_list[batch_inds]
|
470 |
+
|
471 |
+
images = np.zeros([num_inputs, image_size, image_size, c_dim]).astype('float32')
|
472 |
+
|
473 |
+
for ind, img in enumerate(batch_menpo_images):
|
474 |
+
images[ind, :, :, :] = np.rollaxis(img.pixels, 0, 3)
|
475 |
+
|
476 |
+
if scale is '255':
|
477 |
+
images *= 255 # SAME AS ECT?
|
478 |
+
elif scale is '0':
|
479 |
+
images = 2 * images - 1
|
480 |
+
|
481 |
+
return images
|
482 |
+
|
483 |
+
|
484 |
+
def merge_images_landmarks_maps_gt(images, maps, maps_gt, image_size=256, num_landmarks=68, num_samples=9, scale='255',
|
485 |
+
circle_size=2, test_data='full', fast=False):
|
486 |
+
images = images[:num_samples]
|
487 |
+
if maps.shape[1] is not image_size:
|
488 |
+
images = zoom(images, (1, 0.25, 0.25, 1))
|
489 |
+
image_size /= 4
|
490 |
+
if maps_gt.shape[1] is not image_size:
|
491 |
+
maps_gt = zoom(maps_gt, (1, 0.25, 0.25, 1))
|
492 |
+
|
493 |
+
cmap = plt.get_cmap('jet')
|
494 |
+
|
495 |
+
row = int(np.sqrt(num_samples))
|
496 |
+
merged = np.zeros([row * image_size, row * image_size * 3, 3])
|
497 |
+
|
498 |
+
if fast:
|
499 |
+
maps_gt_images = np.amax(maps_gt, 3)
|
500 |
+
maps_images = np.amax(maps, 3)
|
501 |
+
|
502 |
+
for idx, img in enumerate(images):
|
503 |
+
i = idx // row
|
504 |
+
j = idx % row
|
505 |
+
|
506 |
+
img_landmarks = heat_maps_to_landmarks(maps[idx, :, :, :], image_size=image_size, num_landmarks=num_landmarks)
|
507 |
+
|
508 |
+
if fast:
|
509 |
+
map_image = maps_images[idx]
|
510 |
+
else:
|
511 |
+
map_image = heat_maps_to_image(maps[idx, :, :, :], img_landmarks, image_size=image_size,
|
512 |
+
num_landmarks=num_landmarks)
|
513 |
+
rgba_map_image = cmap(map_image)
|
514 |
+
map_image = np.delete(rgba_map_image, 3, 2) * 255
|
515 |
+
|
516 |
+
if test_data not in ['full', 'challenging', 'common', 'training']:
|
517 |
+
map_gt_image = map_image.copy()
|
518 |
+
else:
|
519 |
+
if fast:
|
520 |
+
map_gt_image = maps_gt_images[idx]
|
521 |
+
else:
|
522 |
+
map_gt_image = heat_maps_to_image(maps_gt[idx, :, :, :], image_size=image_size, num_landmarks=num_landmarks)
|
523 |
+
rgba_map_gt_image = cmap(map_gt_image)
|
524 |
+
map_gt_image = np.delete(rgba_map_gt_image, 3, 2) * 255
|
525 |
+
|
526 |
+
img = create_img_with_landmarks(img, img_landmarks, image_size, num_landmarks, scale=scale,
|
527 |
+
circle_size=circle_size)
|
528 |
+
|
529 |
+
merged[i * image_size:(i + 1) * image_size, (j * 3) * image_size:(j * 3 + 1) * image_size, :] = img
|
530 |
+
merged[i * image_size:(i + 1) * image_size, (j * 3 + 1) * image_size:(j * 3 + 2) * image_size, :] = map_image
|
531 |
+
merged[i * image_size:(i + 1) * image_size, (j * 3 + 2) * image_size:(j * 3 + 3) * image_size, :] = map_gt_image
|
532 |
+
|
533 |
+
return merged
|
534 |
+
|
535 |
+
|
536 |
+
def map_comapre_channels(images,maps1, maps2, image_size=64, num_landmarks=68, scale='255',test_data='full'):
|
537 |
+
map1 = maps1[0]
|
538 |
+
map2 = maps2[0]
|
539 |
+
image = images[0]
|
540 |
+
|
541 |
+
if image.shape[0] is not image_size:
|
542 |
+
image = zoom(image, (0.25, 0.25, 1))
|
543 |
+
if scale is '1':
|
544 |
+
image *= 255
|
545 |
+
elif scale is '0':
|
546 |
+
image = 127.5 * (image + 1)
|
547 |
+
|
548 |
+
row = np.ceil(np.sqrt(num_landmarks)).astype(np.int64)
|
549 |
+
merged = np.zeros([row * image_size, row * image_size * 2, 3])
|
550 |
+
|
551 |
+
for idx in range(num_landmarks):
|
552 |
+
i = idx // row
|
553 |
+
j = idx % row
|
554 |
+
channel_map = map_to_rgb(normalize_map(map1[:, :, idx]))
|
555 |
+
if test_data not in ['full', 'challenging', 'common', 'training']:
|
556 |
+
channel_map2=channel_map.copy()
|
557 |
+
else:
|
558 |
+
channel_map2 = map_to_rgb(normalize_map(map2[:, :, idx]))
|
559 |
+
|
560 |
+
merged[i * image_size:(i + 1) * image_size, (j * 2) * image_size:(j * 2 + 1) * image_size, :] = channel_map
|
561 |
+
merged[i * image_size:(i + 1) * image_size, (j * 2 + 1) * image_size:(j * 2 + 2) * image_size, :] = channel_map2
|
562 |
+
|
563 |
+
i = (idx + 1) // row
|
564 |
+
j = (idx + 1) % row
|
565 |
+
merged[i * image_size:(i + 1) * image_size, (j * 2) * image_size:(j * 2 + 1) * image_size, :] = image
|
566 |
+
|
567 |
+
return merged
|
568 |
+
|
569 |
+
|
570 |
+
def train_val_shuffle_inds_per_epoch(valid_inds, train_inds, train_iter, batch_size, log_path, save_log=True):
|
571 |
+
np.random.seed(0)
|
572 |
+
num_train_images = len(train_inds)
|
573 |
+
num_epochs = int(np.ceil((1. * train_iter) / (1. * num_train_images / batch_size)))+1
|
574 |
+
epoch_inds_shuffle = np.zeros((num_epochs, num_train_images)).astype(int)
|
575 |
+
img_inds = np.arange(num_train_images)
|
576 |
+
for i in range(num_epochs):
|
577 |
+
np.random.shuffle(img_inds)
|
578 |
+
epoch_inds_shuffle[i, :] = img_inds
|
579 |
+
|
580 |
+
if save_log:
|
581 |
+
with open(os.path.join(log_path, "train_val_shuffle_inds.csv"), "wb") as f:
|
582 |
+
if valid_inds is not None:
|
583 |
+
f.write(b'valid inds\n')
|
584 |
+
np.savetxt(f, valid_inds.reshape(1, -1), fmt='%i', delimiter=",")
|
585 |
+
f.write(b'train inds\n')
|
586 |
+
np.savetxt(f, train_inds.reshape(1, -1), fmt='%i', delimiter=",")
|
587 |
+
f.write(b'shuffle inds\n')
|
588 |
+
np.savetxt(f, epoch_inds_shuffle, fmt='%i', delimiter=",")
|
589 |
+
|
590 |
+
return epoch_inds_shuffle
|
MakeItTalk/thirdparty/face_of_art/old/load_data_module.ipynb
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
MakeItTalk/thirdparty/face_of_art/old/main.py
CHANGED
@@ -1,46 +1,46 @@
|
|
1 |
-
import tensorflow as tf
|
2 |
-
from deep_heatmaps_model_primary_valid import DeepHeatmapsModel
|
3 |
-
|
4 |
-
# data_dir ='/mnt/External1/Yarden/deep_face_heatmaps/data/conventional_landmark_detection_dataset/'
|
5 |
-
data_dir = '/Users/arik/Dropbox/a_mac_thesis/face_heatmap_networks/conventional_landmark_detection_dataset/'
|
6 |
-
pre_train_path = 'saved_models/0.01/model/deep_heatmaps-50000'
|
7 |
-
|
8 |
-
flags = tf.app.flags
|
9 |
-
flags.DEFINE_string('mode', 'TRAIN', "'TRAIN' or 'TEST'")
|
10 |
-
flags.DEFINE_string('save_model_path', 'model', "directory for saving the model")
|
11 |
-
flags.DEFINE_string('save_sample_path', 'sample', "directory for saving the sampled images")
|
12 |
-
flags.DEFINE_string('save_log_path', 'logs', "directory for saving the log file")
|
13 |
-
flags.DEFINE_string('img_path', data_dir, "data directory")
|
14 |
-
flags.DEFINE_string('test_model_path', 'model/deep_heatmaps-5', 'saved model to test')
|
15 |
-
flags.DEFINE_string('test_data', 'full', 'dataset to test: full/common/challenging/test/art')
|
16 |
-
flags.DEFINE_string('pre_train_path', pre_train_path, 'pretrained model path')
|
17 |
-
|
18 |
-
FLAGS = flags.FLAGS
|
19 |
-
|
20 |
-
|
21 |
-
def main(_):
|
22 |
-
|
23 |
-
# create directories if not exist
|
24 |
-
if not tf.gfile.Exists(FLAGS.save_model_path):
|
25 |
-
tf.gfile.MakeDirs(FLAGS.save_model_path)
|
26 |
-
if not tf.gfile.Exists(FLAGS.save_sample_path):
|
27 |
-
tf.gfile.MakeDirs(FLAGS.save_sample_path)
|
28 |
-
if not tf.gfile.Exists(FLAGS.save_log_path):
|
29 |
-
tf.gfile.MakeDirs(FLAGS.save_log_path)
|
30 |
-
|
31 |
-
model = DeepHeatmapsModel(mode=FLAGS.mode, train_iter=80000, learning_rate=1e-11, momentum=0.95, step=80000,
|
32 |
-
gamma=0.1, batch_size=4, image_size=256, c_dim=3, num_landmarks=68,
|
33 |
-
augment_basic=True, basic_start=1, augment_texture=True, p_texture=0.,
|
34 |
-
augment_geom=True, p_geom=0., artistic_start=2, artistic_step=1,
|
35 |
-
img_path=FLAGS.img_path, save_log_path=FLAGS.save_log_path,
|
36 |
-
save_sample_path=FLAGS.save_sample_path, save_model_path=FLAGS.save_model_path,
|
37 |
-
test_data=FLAGS.test_data, test_model_path=FLAGS.test_model_path,
|
38 |
-
load_pretrain=False, pre_train_path=FLAGS.pre_train_path)
|
39 |
-
|
40 |
-
if FLAGS.mode == 'TRAIN':
|
41 |
-
model.train()
|
42 |
-
else:
|
43 |
-
model.eval()
|
44 |
-
|
45 |
-
if __name__ == '__main__':
|
46 |
-
tf.app.run()
|
|
|
1 |
+
import tensorflow as tf
|
2 |
+
from deep_heatmaps_model_primary_valid import DeepHeatmapsModel
|
3 |
+
|
4 |
+
# data_dir ='/mnt/External1/Yarden/deep_face_heatmaps/data/conventional_landmark_detection_dataset/'
|
5 |
+
data_dir = '/Users/arik/Dropbox/a_mac_thesis/face_heatmap_networks/conventional_landmark_detection_dataset/'
|
6 |
+
pre_train_path = 'saved_models/0.01/model/deep_heatmaps-50000'
|
7 |
+
|
8 |
+
flags = tf.app.flags
|
9 |
+
flags.DEFINE_string('mode', 'TRAIN', "'TRAIN' or 'TEST'")
|
10 |
+
flags.DEFINE_string('save_model_path', 'model', "directory for saving the model")
|
11 |
+
flags.DEFINE_string('save_sample_path', 'sample', "directory for saving the sampled images")
|
12 |
+
flags.DEFINE_string('save_log_path', 'logs', "directory for saving the log file")
|
13 |
+
flags.DEFINE_string('img_path', data_dir, "data directory")
|
14 |
+
flags.DEFINE_string('test_model_path', 'model/deep_heatmaps-5', 'saved model to test')
|
15 |
+
flags.DEFINE_string('test_data', 'full', 'dataset to test: full/common/challenging/test/art')
|
16 |
+
flags.DEFINE_string('pre_train_path', pre_train_path, 'pretrained model path')
|
17 |
+
|
18 |
+
FLAGS = flags.FLAGS
|
19 |
+
|
20 |
+
|
21 |
+
def main(_):
|
22 |
+
|
23 |
+
# create directories if not exist
|
24 |
+
if not tf.gfile.Exists(FLAGS.save_model_path):
|
25 |
+
tf.gfile.MakeDirs(FLAGS.save_model_path)
|
26 |
+
if not tf.gfile.Exists(FLAGS.save_sample_path):
|
27 |
+
tf.gfile.MakeDirs(FLAGS.save_sample_path)
|
28 |
+
if not tf.gfile.Exists(FLAGS.save_log_path):
|
29 |
+
tf.gfile.MakeDirs(FLAGS.save_log_path)
|
30 |
+
|
31 |
+
model = DeepHeatmapsModel(mode=FLAGS.mode, train_iter=80000, learning_rate=1e-11, momentum=0.95, step=80000,
|
32 |
+
gamma=0.1, batch_size=4, image_size=256, c_dim=3, num_landmarks=68,
|
33 |
+
augment_basic=True, basic_start=1, augment_texture=True, p_texture=0.,
|
34 |
+
augment_geom=True, p_geom=0., artistic_start=2, artistic_step=1,
|
35 |
+
img_path=FLAGS.img_path, save_log_path=FLAGS.save_log_path,
|
36 |
+
save_sample_path=FLAGS.save_sample_path, save_model_path=FLAGS.save_model_path,
|
37 |
+
test_data=FLAGS.test_data, test_model_path=FLAGS.test_model_path,
|
38 |
+
load_pretrain=False, pre_train_path=FLAGS.pre_train_path)
|
39 |
+
|
40 |
+
if FLAGS.mode == 'TRAIN':
|
41 |
+
model.train()
|
42 |
+
else:
|
43 |
+
model.eval()
|
44 |
+
|
45 |
+
if __name__ == '__main__':
|
46 |
+
tf.app.run()
|
MakeItTalk/thirdparty/face_of_art/old/main_fusion.py
CHANGED
@@ -1,122 +1,122 @@
|
|
1 |
-
import tensorflow as tf
|
2 |
-
from deep_heatmaps_model_fusion_net import DeepHeatmapsModel
|
3 |
-
import os
|
4 |
-
|
5 |
-
|
6 |
-
data_dir = '/Users/arik/Dropbox/a_mac_thesis/face_heatmap_networks/conventional_landmark_detection_dataset/'
|
7 |
-
pre_train_path = 'saved_models/0.01/model/deep_heatmaps-50000'
|
8 |
-
output_dir = os.getcwd()
|
9 |
-
|
10 |
-
flags = tf.app.flags
|
11 |
-
|
12 |
-
# mode and logging parameters
|
13 |
-
flags.DEFINE_string('mode', 'TRAIN', "'TRAIN' or 'TEST'")
|
14 |
-
flags.DEFINE_integer('print_every', 100, "print losses to screen + log every X steps")
|
15 |
-
flags.DEFINE_integer('save_every', 20000, "save model every X steps")
|
16 |
-
flags.DEFINE_integer('sample_every', 5000, "sample heatmaps + landmark predictions every X steps")
|
17 |
-
flags.DEFINE_integer('sample_grid', 4, 'number of training images in sample')
|
18 |
-
flags.DEFINE_bool('sample_to_log', True, 'samples will be saved to tensorboard log')
|
19 |
-
flags.DEFINE_integer('valid_size', 4, 'number of validation images to run')
|
20 |
-
flags.DEFINE_integer('log_valid_every', 10, 'evaluate on valid set every X epochs')
|
21 |
-
flags.DEFINE_integer('debug_data_size', 20, 'subset data size to test in debug mode')
|
22 |
-
flags.DEFINE_bool('debug', False, 'run in debug mode - use subset of the data')
|
23 |
-
|
24 |
-
# define paths
|
25 |
-
flags.DEFINE_string('output_dir', output_dir, "directory for saving models, logs and samples")
|
26 |
-
flags.DEFINE_string('save_model_path', 'model', "directory for saving the model")
|
27 |
-
flags.DEFINE_string('save_sample_path', 'sample', "directory for saving the sampled images")
|
28 |
-
flags.DEFINE_string('save_log_path', 'logs', "directory for saving the log file")
|
29 |
-
flags.DEFINE_string('img_path', data_dir, "data directory")
|
30 |
-
flags.DEFINE_string('test_model_path', 'model/deep_heatmaps-50000', "saved model to test")
|
31 |
-
flags.DEFINE_string('test_data', 'full', 'test set to use: full/common/challenging/test/art')
|
32 |
-
flags.DEFINE_string('valid_data', 'full', 'validation set to use: full/common/challenging/test/art')
|
33 |
-
flags.DEFINE_string('train_crop_dir', 'crop_gt_margin_0.25', "directory of train images cropped to bb (+margin)")
|
34 |
-
flags.DEFINE_string('img_dir_ns', 'crop_gt_margin_0.25_ns', "dir of train imgs cropped to bb + style transfer")
|
35 |
-
flags.DEFINE_string('epoch_data_dir', 'epoch_data', "directory containing pre-augmented data for each epoch")
|
36 |
-
flags.DEFINE_bool('use_epoch_data', False, "use pre-augmented data")
|
37 |
-
|
38 |
-
# pretrain parameters (for fine-tuning / resume training)
|
39 |
-
flags.DEFINE_string('pre_train_path', pre_train_path, 'pretrained model path')
|
40 |
-
flags.DEFINE_bool('load_pretrain', False, "load pretrained weight?")
|
41 |
-
flags.DEFINE_bool('load_primary_only', False, 'fine-tuning using only primary network weights')
|
42 |
-
|
43 |
-
# input data parameters
|
44 |
-
flags.DEFINE_integer('image_size', 256, "image size")
|
45 |
-
flags.DEFINE_integer('c_dim', 3, "color channels")
|
46 |
-
flags.DEFINE_integer('num_landmarks', 68, "number of face landmarks")
|
47 |
-
flags.DEFINE_float('sigma', 6, "std for heatmap generation gaussian")
|
48 |
-
flags.DEFINE_integer('scale', 1, 'scale for image normalization 255/1/0')
|
49 |
-
flags.DEFINE_float('margin', 0.25, 'margin for face crops - % of bb size')
|
50 |
-
flags.DEFINE_string('bb_type', 'gt', "bb to use - 'gt':for ground truth / 'init':for face detector output")
|
51 |
-
flags.DEFINE_float('win_mult', 3.33335, 'gaussian filter size for approx maps: 2 * sigma * win_mult + 1')
|
52 |
-
|
53 |
-
# optimization parameters
|
54 |
-
flags.DEFINE_float('l_weight_primary', 1., 'primary loss weight')
|
55 |
-
flags.DEFINE_float('l_weight_fusion', 0., 'fusion loss weight')
|
56 |
-
flags.DEFINE_float('l_weight_upsample', 3., 'upsample loss weight')
|
57 |
-
flags.DEFINE_integer('train_iter', 100000, 'maximum training iterations')
|
58 |
-
flags.DEFINE_integer('batch_size', 6, "batch_size")
|
59 |
-
flags.DEFINE_float('learning_rate', 1e-4, "initial learning rate")
|
60 |
-
flags.DEFINE_bool('adam_optimizer', True, "use adam optimizer (if False momentum optimizer is used)")
|
61 |
-
flags.DEFINE_float('momentum', 0.95, "optimizer momentum (if adam_optimizer==False)")
|
62 |
-
flags.DEFINE_integer('step', 100000, 'step for lr decay')
|
63 |
-
flags.DEFINE_float('gamma', 0.1, 'exponential base for lr decay')
|
64 |
-
flags.DEFINE_float('reg', 1e-5, 'scalar multiplier for weight decay (0 to disable)')
|
65 |
-
flags.DEFINE_string('weight_initializer', 'xavier', 'weight initializer: random_normal / xavier')
|
66 |
-
flags.DEFINE_float('weight_initializer_std', 0.01, 'std for random_normal weight initializer')
|
67 |
-
flags.DEFINE_float('bias_initializer', 0.0, 'constant value for bias initializer')
|
68 |
-
|
69 |
-
# augmentation parameters
|
70 |
-
flags.DEFINE_bool('augment_basic', True,"use basic augmentation?")
|
71 |
-
flags.DEFINE_bool('augment_texture', False,"use artistic texture augmentation?")
|
72 |
-
flags.DEFINE_float('p_texture', 0., 'probability of artistic texture augmentation')
|
73 |
-
flags.DEFINE_bool('augment_geom', False, "use artistic geometric augmentation?")
|
74 |
-
flags.DEFINE_float('p_geom', 0., 'probability of artistic geometric augmentation')
|
75 |
-
|
76 |
-
|
77 |
-
FLAGS = flags.FLAGS
|
78 |
-
|
79 |
-
if not os.path.exists(FLAGS.output_dir):
|
80 |
-
os.mkdir(FLAGS.output_dir)
|
81 |
-
|
82 |
-
|
83 |
-
def main(_):
|
84 |
-
|
85 |
-
save_model_path = os.path.join(FLAGS.output_dir, FLAGS.save_model_path)
|
86 |
-
save_sample_path = os.path.join(FLAGS.output_dir, FLAGS.save_sample_path)
|
87 |
-
save_log_path = os.path.join(FLAGS.output_dir, FLAGS.save_log_path)
|
88 |
-
|
89 |
-
# create directories if not exist
|
90 |
-
if not os.path.exists(save_model_path):
|
91 |
-
os.mkdir(save_model_path)
|
92 |
-
if not os.path.exists(save_log_path):
|
93 |
-
os.mkdir(save_log_path)
|
94 |
-
if not os.path.exists(save_sample_path) and (not FLAGS.sample_to_log or FLAGS.mode != 'TRAIN'):
|
95 |
-
os.mkdir(save_sample_path)
|
96 |
-
|
97 |
-
model = DeepHeatmapsModel(
|
98 |
-
mode=FLAGS.mode, train_iter=FLAGS.train_iter, batch_size=FLAGS.batch_size, learning_rate=FLAGS.learning_rate,
|
99 |
-
l_weight_primary=FLAGS.l_weight_primary, l_weight_fusion=FLAGS.l_weight_fusion,
|
100 |
-
l_weight_upsample=FLAGS.l_weight_upsample, reg=FLAGS.reg, adam_optimizer=FLAGS.adam_optimizer,
|
101 |
-
momentum=FLAGS.momentum, step=FLAGS.step, gamma=FLAGS.gamma,
|
102 |
-
weight_initializer=FLAGS.weight_initializer, weight_initializer_std=FLAGS.weight_initializer_std,
|
103 |
-
bias_initializer=FLAGS.bias_initializer, image_size=FLAGS.image_size, c_dim=FLAGS.c_dim,
|
104 |
-
num_landmarks=FLAGS.num_landmarks, sigma=FLAGS.sigma, scale=FLAGS.scale, margin=FLAGS.margin,
|
105 |
-
bb_type=FLAGS.bb_type, win_mult=FLAGS.win_mult, augment_basic=FLAGS.augment_basic,
|
106 |
-
augment_texture=FLAGS.augment_texture, p_texture=FLAGS.p_texture, augment_geom=FLAGS.augment_geom,
|
107 |
-
p_geom=FLAGS.p_geom, output_dir=FLAGS.output_dir, save_model_path=save_model_path,
|
108 |
-
save_sample_path=save_sample_path, save_log_path=save_log_path, test_model_path=FLAGS.test_model_path,
|
109 |
-
pre_train_path=FLAGS.pre_train_path, load_pretrain=FLAGS.load_pretrain, load_primary_only=FLAGS.load_primary_only,
|
110 |
-
img_path=FLAGS.img_path, test_data=FLAGS.test_data, valid_data=FLAGS.valid_data, valid_size=FLAGS.valid_size,
|
111 |
-
log_valid_every=FLAGS.log_valid_every, train_crop_dir=FLAGS.train_crop_dir, img_dir_ns=FLAGS.img_dir_ns,
|
112 |
-
print_every=FLAGS.print_every, save_every=FLAGS.save_every, sample_every=FLAGS.sample_every,
|
113 |
-
sample_grid=FLAGS.sample_grid, sample_to_log=FLAGS.sample_to_log, debug_data_size=FLAGS.debug_data_size,
|
114 |
-
debug=FLAGS.debug, use_epoch_data=FLAGS.use_epoch_data, epoch_data_dir=FLAGS.epoch_data_dir)
|
115 |
-
|
116 |
-
if FLAGS.mode == 'TRAIN':
|
117 |
-
model.train()
|
118 |
-
else:
|
119 |
-
model.eval()
|
120 |
-
|
121 |
-
if __name__ == '__main__':
|
122 |
-
tf.app.run()
|
|
|
1 |
+
import tensorflow as tf
|
2 |
+
from deep_heatmaps_model_fusion_net import DeepHeatmapsModel
|
3 |
+
import os
|
4 |
+
|
5 |
+
|
6 |
+
data_dir = '/Users/arik/Dropbox/a_mac_thesis/face_heatmap_networks/conventional_landmark_detection_dataset/'
|
7 |
+
pre_train_path = 'saved_models/0.01/model/deep_heatmaps-50000'
|
8 |
+
output_dir = os.getcwd()
|
9 |
+
|
10 |
+
flags = tf.app.flags
|
11 |
+
|
12 |
+
# mode and logging parameters
|
13 |
+
flags.DEFINE_string('mode', 'TRAIN', "'TRAIN' or 'TEST'")
|
14 |
+
flags.DEFINE_integer('print_every', 100, "print losses to screen + log every X steps")
|
15 |
+
flags.DEFINE_integer('save_every', 20000, "save model every X steps")
|
16 |
+
flags.DEFINE_integer('sample_every', 5000, "sample heatmaps + landmark predictions every X steps")
|
17 |
+
flags.DEFINE_integer('sample_grid', 4, 'number of training images in sample')
|
18 |
+
flags.DEFINE_bool('sample_to_log', True, 'samples will be saved to tensorboard log')
|
19 |
+
flags.DEFINE_integer('valid_size', 4, 'number of validation images to run')
|
20 |
+
flags.DEFINE_integer('log_valid_every', 10, 'evaluate on valid set every X epochs')
|
21 |
+
flags.DEFINE_integer('debug_data_size', 20, 'subset data size to test in debug mode')
|
22 |
+
flags.DEFINE_bool('debug', False, 'run in debug mode - use subset of the data')
|
23 |
+
|
24 |
+
# define paths
|
25 |
+
flags.DEFINE_string('output_dir', output_dir, "directory for saving models, logs and samples")
|
26 |
+
flags.DEFINE_string('save_model_path', 'model', "directory for saving the model")
|
27 |
+
flags.DEFINE_string('save_sample_path', 'sample', "directory for saving the sampled images")
|
28 |
+
flags.DEFINE_string('save_log_path', 'logs', "directory for saving the log file")
|
29 |
+
flags.DEFINE_string('img_path', data_dir, "data directory")
|
30 |
+
flags.DEFINE_string('test_model_path', 'model/deep_heatmaps-50000', "saved model to test")
|
31 |
+
flags.DEFINE_string('test_data', 'full', 'test set to use: full/common/challenging/test/art')
|
32 |
+
flags.DEFINE_string('valid_data', 'full', 'validation set to use: full/common/challenging/test/art')
|
33 |
+
flags.DEFINE_string('train_crop_dir', 'crop_gt_margin_0.25', "directory of train images cropped to bb (+margin)")
|
34 |
+
flags.DEFINE_string('img_dir_ns', 'crop_gt_margin_0.25_ns', "dir of train imgs cropped to bb + style transfer")
|
35 |
+
flags.DEFINE_string('epoch_data_dir', 'epoch_data', "directory containing pre-augmented data for each epoch")
|
36 |
+
flags.DEFINE_bool('use_epoch_data', False, "use pre-augmented data")
|
37 |
+
|
38 |
+
# pretrain parameters (for fine-tuning / resume training)
|
39 |
+
flags.DEFINE_string('pre_train_path', pre_train_path, 'pretrained model path')
|
40 |
+
flags.DEFINE_bool('load_pretrain', False, "load pretrained weight?")
|
41 |
+
flags.DEFINE_bool('load_primary_only', False, 'fine-tuning using only primary network weights')
|
42 |
+
|
43 |
+
# input data parameters
|
44 |
+
flags.DEFINE_integer('image_size', 256, "image size")
|
45 |
+
flags.DEFINE_integer('c_dim', 3, "color channels")
|
46 |
+
flags.DEFINE_integer('num_landmarks', 68, "number of face landmarks")
|
47 |
+
flags.DEFINE_float('sigma', 6, "std for heatmap generation gaussian")
|
48 |
+
flags.DEFINE_integer('scale', 1, 'scale for image normalization 255/1/0')
|
49 |
+
flags.DEFINE_float('margin', 0.25, 'margin for face crops - % of bb size')
|
50 |
+
flags.DEFINE_string('bb_type', 'gt', "bb to use - 'gt':for ground truth / 'init':for face detector output")
|
51 |
+
flags.DEFINE_float('win_mult', 3.33335, 'gaussian filter size for approx maps: 2 * sigma * win_mult + 1')
|
52 |
+
|
53 |
+
# optimization parameters
|
54 |
+
flags.DEFINE_float('l_weight_primary', 1., 'primary loss weight')
|
55 |
+
flags.DEFINE_float('l_weight_fusion', 0., 'fusion loss weight')
|
56 |
+
flags.DEFINE_float('l_weight_upsample', 3., 'upsample loss weight')
|
57 |
+
flags.DEFINE_integer('train_iter', 100000, 'maximum training iterations')
|
58 |
+
flags.DEFINE_integer('batch_size', 6, "batch_size")
|
59 |
+
flags.DEFINE_float('learning_rate', 1e-4, "initial learning rate")
|
60 |
+
flags.DEFINE_bool('adam_optimizer', True, "use adam optimizer (if False momentum optimizer is used)")
|
61 |
+
flags.DEFINE_float('momentum', 0.95, "optimizer momentum (if adam_optimizer==False)")
|
62 |
+
flags.DEFINE_integer('step', 100000, 'step for lr decay')
|
63 |
+
flags.DEFINE_float('gamma', 0.1, 'exponential base for lr decay')
|
64 |
+
flags.DEFINE_float('reg', 1e-5, 'scalar multiplier for weight decay (0 to disable)')
|
65 |
+
flags.DEFINE_string('weight_initializer', 'xavier', 'weight initializer: random_normal / xavier')
|
66 |
+
flags.DEFINE_float('weight_initializer_std', 0.01, 'std for random_normal weight initializer')
|
67 |
+
flags.DEFINE_float('bias_initializer', 0.0, 'constant value for bias initializer')
|
68 |
+
|
69 |
+
# augmentation parameters
|
70 |
+
flags.DEFINE_bool('augment_basic', True,"use basic augmentation?")
|
71 |
+
flags.DEFINE_bool('augment_texture', False,"use artistic texture augmentation?")
|
72 |
+
flags.DEFINE_float('p_texture', 0., 'probability of artistic texture augmentation')
|
73 |
+
flags.DEFINE_bool('augment_geom', False, "use artistic geometric augmentation?")
|
74 |
+
flags.DEFINE_float('p_geom', 0., 'probability of artistic geometric augmentation')
|
75 |
+
|
76 |
+
|
77 |
+
FLAGS = flags.FLAGS
|
78 |
+
|
79 |
+
if not os.path.exists(FLAGS.output_dir):
|
80 |
+
os.mkdir(FLAGS.output_dir)
|
81 |
+
|
82 |
+
|
83 |
+
def main(_):
|
84 |
+
|
85 |
+
save_model_path = os.path.join(FLAGS.output_dir, FLAGS.save_model_path)
|
86 |
+
save_sample_path = os.path.join(FLAGS.output_dir, FLAGS.save_sample_path)
|
87 |
+
save_log_path = os.path.join(FLAGS.output_dir, FLAGS.save_log_path)
|
88 |
+
|
89 |
+
# create directories if not exist
|
90 |
+
if not os.path.exists(save_model_path):
|
91 |
+
os.mkdir(save_model_path)
|
92 |
+
if not os.path.exists(save_log_path):
|
93 |
+
os.mkdir(save_log_path)
|
94 |
+
if not os.path.exists(save_sample_path) and (not FLAGS.sample_to_log or FLAGS.mode != 'TRAIN'):
|
95 |
+
os.mkdir(save_sample_path)
|
96 |
+
|
97 |
+
model = DeepHeatmapsModel(
|
98 |
+
mode=FLAGS.mode, train_iter=FLAGS.train_iter, batch_size=FLAGS.batch_size, learning_rate=FLAGS.learning_rate,
|
99 |
+
l_weight_primary=FLAGS.l_weight_primary, l_weight_fusion=FLAGS.l_weight_fusion,
|
100 |
+
l_weight_upsample=FLAGS.l_weight_upsample, reg=FLAGS.reg, adam_optimizer=FLAGS.adam_optimizer,
|
101 |
+
momentum=FLAGS.momentum, step=FLAGS.step, gamma=FLAGS.gamma,
|
102 |
+
weight_initializer=FLAGS.weight_initializer, weight_initializer_std=FLAGS.weight_initializer_std,
|
103 |
+
bias_initializer=FLAGS.bias_initializer, image_size=FLAGS.image_size, c_dim=FLAGS.c_dim,
|
104 |
+
num_landmarks=FLAGS.num_landmarks, sigma=FLAGS.sigma, scale=FLAGS.scale, margin=FLAGS.margin,
|
105 |
+
bb_type=FLAGS.bb_type, win_mult=FLAGS.win_mult, augment_basic=FLAGS.augment_basic,
|
106 |
+
augment_texture=FLAGS.augment_texture, p_texture=FLAGS.p_texture, augment_geom=FLAGS.augment_geom,
|
107 |
+
p_geom=FLAGS.p_geom, output_dir=FLAGS.output_dir, save_model_path=save_model_path,
|
108 |
+
save_sample_path=save_sample_path, save_log_path=save_log_path, test_model_path=FLAGS.test_model_path,
|
109 |
+
pre_train_path=FLAGS.pre_train_path, load_pretrain=FLAGS.load_pretrain, load_primary_only=FLAGS.load_primary_only,
|
110 |
+
img_path=FLAGS.img_path, test_data=FLAGS.test_data, valid_data=FLAGS.valid_data, valid_size=FLAGS.valid_size,
|
111 |
+
log_valid_every=FLAGS.log_valid_every, train_crop_dir=FLAGS.train_crop_dir, img_dir_ns=FLAGS.img_dir_ns,
|
112 |
+
print_every=FLAGS.print_every, save_every=FLAGS.save_every, sample_every=FLAGS.sample_every,
|
113 |
+
sample_grid=FLAGS.sample_grid, sample_to_log=FLAGS.sample_to_log, debug_data_size=FLAGS.debug_data_size,
|
114 |
+
debug=FLAGS.debug, use_epoch_data=FLAGS.use_epoch_data, epoch_data_dir=FLAGS.epoch_data_dir)
|
115 |
+
|
116 |
+
if FLAGS.mode == 'TRAIN':
|
117 |
+
model.train()
|
118 |
+
else:
|
119 |
+
model.eval()
|
120 |
+
|
121 |
+
if __name__ == '__main__':
|
122 |
+
tf.app.run()
|
MakeItTalk/thirdparty/face_of_art/old/main_fusion_server.py
CHANGED
@@ -1,92 +1,92 @@
|
|
1 |
-
import tensorflow as tf
|
2 |
-
from deep_heatmaps_model_primary_fusion import DeepHeatmapsModel
|
3 |
-
import os
|
4 |
-
|
5 |
-
# data_dir ='/mnt/External1/Yarden/deep_face_heatmaps/data/conventional_landmark_detection_dataset/'
|
6 |
-
data_dir = '/Users/arik/Dropbox/a_mac_thesis/face_heatmap_networks/conventional_landmark_detection_dataset/'
|
7 |
-
pre_train_path = 'saved_models/0.01/model/deep_heatmaps-50000'
|
8 |
-
output_dir = os.getcwd()
|
9 |
-
|
10 |
-
flags = tf.app.flags
|
11 |
-
|
12 |
-
flags.DEFINE_string('mode', 'TRAIN', "'TRAIN' or 'TEST'")
|
13 |
-
|
14 |
-
# define paths
|
15 |
-
flags.DEFINE_string('save_model_path', 'model', "directory for saving the model")
|
16 |
-
flags.DEFINE_string('save_sample_path', 'sample', "directory for saving the sampled images")
|
17 |
-
flags.DEFINE_string('save_log_path', 'logs', "directory for saving the log file")
|
18 |
-
flags.DEFINE_string('img_path', data_dir, "data directory")
|
19 |
-
flags.DEFINE_string('test_model_path', 'model/deep_heatmaps-5', "saved model to test")
|
20 |
-
flags.DEFINE_string('test_data','full', 'test set to use full/common/challenging/test/art')
|
21 |
-
|
22 |
-
# pretrain parameters
|
23 |
-
flags.DEFINE_string('pre_train_path', pre_train_path, 'pretrained model path')
|
24 |
-
flags.DEFINE_bool('load_pretrain', False, "load pretrained weight?")
|
25 |
-
flags.DEFINE_bool('load_primary_only', True, "load primary weight only?")
|
26 |
-
|
27 |
-
flags.DEFINE_integer('image_size', 256, "image size")
|
28 |
-
flags.DEFINE_integer('c_dim', 3, "color channels")
|
29 |
-
flags.DEFINE_integer('num_landmarks', 68, "number of face landmarks")
|
30 |
-
|
31 |
-
# optimization parameters
|
32 |
-
flags.DEFINE_integer('train_iter', 100000, 'maximum training iterations')
|
33 |
-
flags.DEFINE_integer('batch_size', 10, "batch_size")
|
34 |
-
flags.DEFINE_float('learning_rate', 1e-6, "initial learning rate")
|
35 |
-
flags.DEFINE_float('momentum', 0.95, 'optimizer momentum')
|
36 |
-
flags.DEFINE_integer('step', 100000, 'step for lr decay')
|
37 |
-
flags.DEFINE_float('gamma', 0.1, 'exponential base for lr decay')
|
38 |
-
|
39 |
-
# augmentation parameters
|
40 |
-
flags.DEFINE_bool('augment_basic', True,"use basic augmentation?")
|
41 |
-
flags.DEFINE_bool('augment_texture', False,"use artistic texture augmentation?")
|
42 |
-
flags.DEFINE_bool('augment_geom', False,"use artistic geometric augmentation?")
|
43 |
-
flags.DEFINE_integer('basic_start', 0, 'min epoch to start basic augmentation')
|
44 |
-
flags.DEFINE_float('p_texture', 0., 'initial probability of artistic texture augmentation')
|
45 |
-
flags.DEFINE_float('p_geom', 0., 'initial probability of artistic geometric augmentation')
|
46 |
-
flags.DEFINE_integer('artistic_step', 10, 'increase probability of artistic augmentation every X epochs')
|
47 |
-
flags.DEFINE_integer('artistic_start', 0, 'min epoch to start artistic augmentation')
|
48 |
-
|
49 |
-
|
50 |
-
# directory of test
|
51 |
-
flags.DEFINE_string('output_dir', output_dir, "directory for saving test")
|
52 |
-
|
53 |
-
FLAGS = flags.FLAGS
|
54 |
-
|
55 |
-
if not os.path.exists(FLAGS.output_dir):
|
56 |
-
os.mkdir(FLAGS.output_dir)
|
57 |
-
|
58 |
-
|
59 |
-
def main(_):
|
60 |
-
|
61 |
-
save_model_path = os.path.join(FLAGS.output_dir, FLAGS.save_model_path)
|
62 |
-
save_sample_path = os.path.join(FLAGS.output_dir, FLAGS.save_sample_path)
|
63 |
-
save_log_path = os.path.join(FLAGS.output_dir, FLAGS.save_log_path)
|
64 |
-
|
65 |
-
# create directories if not exist
|
66 |
-
if not os.path.exists(save_model_path):
|
67 |
-
os.mkdir(save_model_path)
|
68 |
-
if not os.path.exists(save_sample_path):
|
69 |
-
os.mkdir(save_sample_path)
|
70 |
-
if not os.path.exists(save_log_path):
|
71 |
-
os.mkdir(save_log_path)
|
72 |
-
|
73 |
-
model = DeepHeatmapsModel(mode=FLAGS.mode, train_iter=FLAGS.train_iter, learning_rate=FLAGS.learning_rate,
|
74 |
-
momentum=FLAGS.momentum, step=FLAGS.step, gamma=FLAGS.gamma, batch_size=FLAGS.batch_size,
|
75 |
-
image_size=FLAGS.image_size, c_dim=FLAGS.c_dim, num_landmarks=FLAGS.num_landmarks,
|
76 |
-
augment_basic=FLAGS.augment_basic, basic_start=FLAGS.basic_start,
|
77 |
-
augment_texture=FLAGS.augment_texture, p_texture=FLAGS.p_texture,
|
78 |
-
augment_geom=FLAGS.augment_geom, p_geom=FLAGS.p_geom,
|
79 |
-
artistic_start=FLAGS.artistic_start, artistic_step=FLAGS.artistic_step,
|
80 |
-
img_path=FLAGS.img_path, save_log_path=save_log_path,
|
81 |
-
save_sample_path=save_sample_path, save_model_path=save_model_path,
|
82 |
-
test_data=FLAGS.test_data, test_model_path=FLAGS.test_model_path,
|
83 |
-
load_pretrain=FLAGS.load_pretrain, load_primary_only=FLAGS.load_primary_only,
|
84 |
-
pre_train_path=FLAGS.pre_train_path)
|
85 |
-
|
86 |
-
if FLAGS.mode == 'TRAIN':
|
87 |
-
model.train()
|
88 |
-
else:
|
89 |
-
model.eval()
|
90 |
-
|
91 |
-
if __name__ == '__main__':
|
92 |
-
tf.app.run()
|
|
|
1 |
+
import tensorflow as tf
|
2 |
+
from deep_heatmaps_model_primary_fusion import DeepHeatmapsModel
|
3 |
+
import os
|
4 |
+
|
5 |
+
# data_dir ='/mnt/External1/Yarden/deep_face_heatmaps/data/conventional_landmark_detection_dataset/'
|
6 |
+
data_dir = '/Users/arik/Dropbox/a_mac_thesis/face_heatmap_networks/conventional_landmark_detection_dataset/'
|
7 |
+
pre_train_path = 'saved_models/0.01/model/deep_heatmaps-50000'
|
8 |
+
output_dir = os.getcwd()
|
9 |
+
|
10 |
+
flags = tf.app.flags
|
11 |
+
|
12 |
+
flags.DEFINE_string('mode', 'TRAIN', "'TRAIN' or 'TEST'")
|
13 |
+
|
14 |
+
# define paths
|
15 |
+
flags.DEFINE_string('save_model_path', 'model', "directory for saving the model")
|
16 |
+
flags.DEFINE_string('save_sample_path', 'sample', "directory for saving the sampled images")
|
17 |
+
flags.DEFINE_string('save_log_path', 'logs', "directory for saving the log file")
|
18 |
+
flags.DEFINE_string('img_path', data_dir, "data directory")
|
19 |
+
flags.DEFINE_string('test_model_path', 'model/deep_heatmaps-5', "saved model to test")
|
20 |
+
flags.DEFINE_string('test_data','full', 'test set to use full/common/challenging/test/art')
|
21 |
+
|
22 |
+
# pretrain parameters
|
23 |
+
flags.DEFINE_string('pre_train_path', pre_train_path, 'pretrained model path')
|
24 |
+
flags.DEFINE_bool('load_pretrain', False, "load pretrained weight?")
|
25 |
+
flags.DEFINE_bool('load_primary_only', True, "load primary weight only?")
|
26 |
+
|
27 |
+
flags.DEFINE_integer('image_size', 256, "image size")
|
28 |
+
flags.DEFINE_integer('c_dim', 3, "color channels")
|
29 |
+
flags.DEFINE_integer('num_landmarks', 68, "number of face landmarks")
|
30 |
+
|
31 |
+
# optimization parameters
|
32 |
+
flags.DEFINE_integer('train_iter', 100000, 'maximum training iterations')
|
33 |
+
flags.DEFINE_integer('batch_size', 10, "batch_size")
|
34 |
+
flags.DEFINE_float('learning_rate', 1e-6, "initial learning rate")
|
35 |
+
flags.DEFINE_float('momentum', 0.95, 'optimizer momentum')
|
36 |
+
flags.DEFINE_integer('step', 100000, 'step for lr decay')
|
37 |
+
flags.DEFINE_float('gamma', 0.1, 'exponential base for lr decay')
|
38 |
+
|
39 |
+
# augmentation parameters
|
40 |
+
flags.DEFINE_bool('augment_basic', True,"use basic augmentation?")
|
41 |
+
flags.DEFINE_bool('augment_texture', False,"use artistic texture augmentation?")
|
42 |
+
flags.DEFINE_bool('augment_geom', False,"use artistic geometric augmentation?")
|
43 |
+
flags.DEFINE_integer('basic_start', 0, 'min epoch to start basic augmentation')
|
44 |
+
flags.DEFINE_float('p_texture', 0., 'initial probability of artistic texture augmentation')
|
45 |
+
flags.DEFINE_float('p_geom', 0., 'initial probability of artistic geometric augmentation')
|
46 |
+
flags.DEFINE_integer('artistic_step', 10, 'increase probability of artistic augmentation every X epochs')
|
47 |
+
flags.DEFINE_integer('artistic_start', 0, 'min epoch to start artistic augmentation')
|
48 |
+
|
49 |
+
|
50 |
+
# directory of test
|
51 |
+
flags.DEFINE_string('output_dir', output_dir, "directory for saving test")
|
52 |
+
|
53 |
+
FLAGS = flags.FLAGS
|
54 |
+
|
55 |
+
if not os.path.exists(FLAGS.output_dir):
|
56 |
+
os.mkdir(FLAGS.output_dir)
|
57 |
+
|
58 |
+
|
59 |
+
def main(_):
|
60 |
+
|
61 |
+
save_model_path = os.path.join(FLAGS.output_dir, FLAGS.save_model_path)
|
62 |
+
save_sample_path = os.path.join(FLAGS.output_dir, FLAGS.save_sample_path)
|
63 |
+
save_log_path = os.path.join(FLAGS.output_dir, FLAGS.save_log_path)
|
64 |
+
|
65 |
+
# create directories if not exist
|
66 |
+
if not os.path.exists(save_model_path):
|
67 |
+
os.mkdir(save_model_path)
|
68 |
+
if not os.path.exists(save_sample_path):
|
69 |
+
os.mkdir(save_sample_path)
|
70 |
+
if not os.path.exists(save_log_path):
|
71 |
+
os.mkdir(save_log_path)
|
72 |
+
|
73 |
+
model = DeepHeatmapsModel(mode=FLAGS.mode, train_iter=FLAGS.train_iter, learning_rate=FLAGS.learning_rate,
|
74 |
+
momentum=FLAGS.momentum, step=FLAGS.step, gamma=FLAGS.gamma, batch_size=FLAGS.batch_size,
|
75 |
+
image_size=FLAGS.image_size, c_dim=FLAGS.c_dim, num_landmarks=FLAGS.num_landmarks,
|
76 |
+
augment_basic=FLAGS.augment_basic, basic_start=FLAGS.basic_start,
|
77 |
+
augment_texture=FLAGS.augment_texture, p_texture=FLAGS.p_texture,
|
78 |
+
augment_geom=FLAGS.augment_geom, p_geom=FLAGS.p_geom,
|
79 |
+
artistic_start=FLAGS.artistic_start, artistic_step=FLAGS.artistic_step,
|
80 |
+
img_path=FLAGS.img_path, save_log_path=save_log_path,
|
81 |
+
save_sample_path=save_sample_path, save_model_path=save_model_path,
|
82 |
+
test_data=FLAGS.test_data, test_model_path=FLAGS.test_model_path,
|
83 |
+
load_pretrain=FLAGS.load_pretrain, load_primary_only=FLAGS.load_primary_only,
|
84 |
+
pre_train_path=FLAGS.pre_train_path)
|
85 |
+
|
86 |
+
if FLAGS.mode == 'TRAIN':
|
87 |
+
model.train()
|
88 |
+
else:
|
89 |
+
model.eval()
|
90 |
+
|
91 |
+
if __name__ == '__main__':
|
92 |
+
tf.app.run()
|
MakeItTalk/thirdparty/face_of_art/old/main_primary_server.py
CHANGED
@@ -1,89 +1,89 @@
|
|
1 |
-
import tensorflow as tf
|
2 |
-
from deep_heatmaps_model_primary_valid import DeepHeatmapsModel
|
3 |
-
import os
|
4 |
-
|
5 |
-
# data_dir ='/mnt/External1/Yarden/deep_face_heatmaps/data/conventional_landmark_detection_dataset/'
|
6 |
-
data_dir = '/Users/arik/Dropbox/a_mac_thesis/face_heatmap_networks/conventional_landmark_detection_dataset/'
|
7 |
-
pre_train_path = 'saved_models/0.01/model/deep_heatmaps-50000'
|
8 |
-
output_dir = os.getcwd()
|
9 |
-
|
10 |
-
flags = tf.app.flags
|
11 |
-
|
12 |
-
flags.DEFINE_string('mode', 'TRAIN', "'TRAIN' or 'TEST'")
|
13 |
-
|
14 |
-
# define paths
|
15 |
-
flags.DEFINE_string('save_model_path', 'model', "directory for saving the model")
|
16 |
-
flags.DEFINE_string('save_sample_path', 'sample', "directory for saving the sampled images")
|
17 |
-
flags.DEFINE_string('save_log_path', 'logs', "directory for saving the log file")
|
18 |
-
flags.DEFINE_string('img_path', data_dir, "data directory")
|
19 |
-
flags.DEFINE_string('test_model_path', 'model/deep_heatmaps-5', "saved model to test")
|
20 |
-
flags.DEFINE_string('test_data','full', 'test set to use full/common/challenging/test/art')
|
21 |
-
|
22 |
-
# pretrain parameters
|
23 |
-
flags.DEFINE_string('pre_train_path', pre_train_path, 'pretrained model path')
|
24 |
-
flags.DEFINE_bool('load_pretrain', False, "load pretrained weight?")
|
25 |
-
|
26 |
-
flags.DEFINE_integer('image_size', 256, "image size")
|
27 |
-
flags.DEFINE_integer('c_dim', 3, "color channels")
|
28 |
-
flags.DEFINE_integer('num_landmarks', 68, "number of face landmarks")
|
29 |
-
|
30 |
-
# optimization parameters
|
31 |
-
flags.DEFINE_integer('train_iter', 100000, 'maximum training iterations')
|
32 |
-
flags.DEFINE_integer('batch_size', 10, "batch_size")
|
33 |
-
flags.DEFINE_float('learning_rate', 1e-6, "initial learning rate")
|
34 |
-
flags.DEFINE_float('momentum', 0.95, 'optimizer momentum')
|
35 |
-
flags.DEFINE_integer('step', 100000, 'step for lr decay')
|
36 |
-
flags.DEFINE_float('gamma', 0.1, 'exponential base for lr decay')
|
37 |
-
|
38 |
-
# augmentation parameters
|
39 |
-
flags.DEFINE_bool('augment_basic', True,"use basic augmentation?")
|
40 |
-
flags.DEFINE_bool('augment_texture', False,"use artistic texture augmentation?")
|
41 |
-
flags.DEFINE_bool('augment_geom', False,"use artistic geometric augmentation?")
|
42 |
-
flags.DEFINE_integer('basic_start', 0, 'min epoch to start basic augmentation')
|
43 |
-
flags.DEFINE_float('p_texture', 0., 'initial probability of artistic texture augmentation')
|
44 |
-
flags.DEFINE_float('p_geom', 0., 'initial probability of artistic geometric augmentation')
|
45 |
-
flags.DEFINE_integer('artistic_step', 10, 'increase probability of artistic augmentation every X epochs')
|
46 |
-
flags.DEFINE_integer('artistic_start', 0, 'min epoch to start artistic augmentation')
|
47 |
-
|
48 |
-
# directory of test
|
49 |
-
flags.DEFINE_string('output_dir', output_dir, "directory for saving test")
|
50 |
-
|
51 |
-
FLAGS = flags.FLAGS
|
52 |
-
|
53 |
-
if not os.path.exists(FLAGS.output_dir):
|
54 |
-
os.mkdir(FLAGS.output_dir)
|
55 |
-
|
56 |
-
|
57 |
-
def main(_):
|
58 |
-
|
59 |
-
save_model_path = os.path.join(FLAGS.output_dir, FLAGS.save_model_path)
|
60 |
-
save_sample_path = os.path.join(FLAGS.output_dir, FLAGS.save_sample_path)
|
61 |
-
save_log_path = os.path.join(FLAGS.output_dir, FLAGS.save_log_path)
|
62 |
-
|
63 |
-
# create directories if not exist
|
64 |
-
if not os.path.exists(save_model_path):
|
65 |
-
os.mkdir(save_model_path)
|
66 |
-
if not os.path.exists(save_sample_path):
|
67 |
-
os.mkdir(save_sample_path)
|
68 |
-
if not os.path.exists(save_log_path):
|
69 |
-
os.mkdir(save_log_path)
|
70 |
-
|
71 |
-
model = DeepHeatmapsModel(mode=FLAGS.mode, train_iter=FLAGS.train_iter, learning_rate=FLAGS.learning_rate,
|
72 |
-
momentum=FLAGS.momentum, step=FLAGS.step, gamma=FLAGS.gamma, batch_size=FLAGS.batch_size,
|
73 |
-
image_size=FLAGS.image_size, c_dim=FLAGS.c_dim, num_landmarks=FLAGS.num_landmarks,
|
74 |
-
augment_basic=FLAGS.augment_basic, basic_start=FLAGS.basic_start,
|
75 |
-
augment_texture=FLAGS.augment_texture, p_texture=FLAGS.p_texture,
|
76 |
-
augment_geom=FLAGS.augment_geom, p_geom=FLAGS.p_geom,
|
77 |
-
artistic_start=FLAGS.artistic_start, artistic_step=FLAGS.artistic_step,
|
78 |
-
img_path=FLAGS.img_path, save_log_path=save_log_path,
|
79 |
-
save_sample_path=save_sample_path, save_model_path=save_model_path,
|
80 |
-
test_data=FLAGS.test_data, test_model_path=FLAGS.test_model_path,
|
81 |
-
load_pretrain=FLAGS.load_pretrain, pre_train_path=FLAGS.pre_train_path)
|
82 |
-
|
83 |
-
if FLAGS.mode == 'TRAIN':
|
84 |
-
model.train()
|
85 |
-
else:
|
86 |
-
model.eval()
|
87 |
-
|
88 |
-
if __name__ == '__main__':
|
89 |
-
tf.app.run()
|
|
|
1 |
+
import tensorflow as tf
|
2 |
+
from deep_heatmaps_model_primary_valid import DeepHeatmapsModel
|
3 |
+
import os
|
4 |
+
|
5 |
+
# data_dir ='/mnt/External1/Yarden/deep_face_heatmaps/data/conventional_landmark_detection_dataset/'
|
6 |
+
data_dir = '/Users/arik/Dropbox/a_mac_thesis/face_heatmap_networks/conventional_landmark_detection_dataset/'
|
7 |
+
pre_train_path = 'saved_models/0.01/model/deep_heatmaps-50000'
|
8 |
+
output_dir = os.getcwd()
|
9 |
+
|
10 |
+
flags = tf.app.flags
|
11 |
+
|
12 |
+
flags.DEFINE_string('mode', 'TRAIN', "'TRAIN' or 'TEST'")
|
13 |
+
|
14 |
+
# define paths
|
15 |
+
flags.DEFINE_string('save_model_path', 'model', "directory for saving the model")
|
16 |
+
flags.DEFINE_string('save_sample_path', 'sample', "directory for saving the sampled images")
|
17 |
+
flags.DEFINE_string('save_log_path', 'logs', "directory for saving the log file")
|
18 |
+
flags.DEFINE_string('img_path', data_dir, "data directory")
|
19 |
+
flags.DEFINE_string('test_model_path', 'model/deep_heatmaps-5', "saved model to test")
|
20 |
+
flags.DEFINE_string('test_data','full', 'test set to use full/common/challenging/test/art')
|
21 |
+
|
22 |
+
# pretrain parameters
|
23 |
+
flags.DEFINE_string('pre_train_path', pre_train_path, 'pretrained model path')
|
24 |
+
flags.DEFINE_bool('load_pretrain', False, "load pretrained weight?")
|
25 |
+
|
26 |
+
flags.DEFINE_integer('image_size', 256, "image size")
|
27 |
+
flags.DEFINE_integer('c_dim', 3, "color channels")
|
28 |
+
flags.DEFINE_integer('num_landmarks', 68, "number of face landmarks")
|
29 |
+
|
30 |
+
# optimization parameters
|
31 |
+
flags.DEFINE_integer('train_iter', 100000, 'maximum training iterations')
|
32 |
+
flags.DEFINE_integer('batch_size', 10, "batch_size")
|
33 |
+
flags.DEFINE_float('learning_rate', 1e-6, "initial learning rate")
|
34 |
+
flags.DEFINE_float('momentum', 0.95, 'optimizer momentum')
|
35 |
+
flags.DEFINE_integer('step', 100000, 'step for lr decay')
|
36 |
+
flags.DEFINE_float('gamma', 0.1, 'exponential base for lr decay')
|
37 |
+
|
38 |
+
# augmentation parameters
|
39 |
+
flags.DEFINE_bool('augment_basic', True,"use basic augmentation?")
|
40 |
+
flags.DEFINE_bool('augment_texture', False,"use artistic texture augmentation?")
|
41 |
+
flags.DEFINE_bool('augment_geom', False,"use artistic geometric augmentation?")
|
42 |
+
flags.DEFINE_integer('basic_start', 0, 'min epoch to start basic augmentation')
|
43 |
+
flags.DEFINE_float('p_texture', 0., 'initial probability of artistic texture augmentation')
|
44 |
+
flags.DEFINE_float('p_geom', 0., 'initial probability of artistic geometric augmentation')
|
45 |
+
flags.DEFINE_integer('artistic_step', 10, 'increase probability of artistic augmentation every X epochs')
|
46 |
+
flags.DEFINE_integer('artistic_start', 0, 'min epoch to start artistic augmentation')
|
47 |
+
|
48 |
+
# directory of test
|
49 |
+
flags.DEFINE_string('output_dir', output_dir, "directory for saving test")
|
50 |
+
|
51 |
+
FLAGS = flags.FLAGS
|
52 |
+
|
53 |
+
if not os.path.exists(FLAGS.output_dir):
|
54 |
+
os.mkdir(FLAGS.output_dir)
|
55 |
+
|
56 |
+
|
57 |
+
def main(_):
|
58 |
+
|
59 |
+
save_model_path = os.path.join(FLAGS.output_dir, FLAGS.save_model_path)
|
60 |
+
save_sample_path = os.path.join(FLAGS.output_dir, FLAGS.save_sample_path)
|
61 |
+
save_log_path = os.path.join(FLAGS.output_dir, FLAGS.save_log_path)
|
62 |
+
|
63 |
+
# create directories if not exist
|
64 |
+
if not os.path.exists(save_model_path):
|
65 |
+
os.mkdir(save_model_path)
|
66 |
+
if not os.path.exists(save_sample_path):
|
67 |
+
os.mkdir(save_sample_path)
|
68 |
+
if not os.path.exists(save_log_path):
|
69 |
+
os.mkdir(save_log_path)
|
70 |
+
|
71 |
+
model = DeepHeatmapsModel(mode=FLAGS.mode, train_iter=FLAGS.train_iter, learning_rate=FLAGS.learning_rate,
|
72 |
+
momentum=FLAGS.momentum, step=FLAGS.step, gamma=FLAGS.gamma, batch_size=FLAGS.batch_size,
|
73 |
+
image_size=FLAGS.image_size, c_dim=FLAGS.c_dim, num_landmarks=FLAGS.num_landmarks,
|
74 |
+
augment_basic=FLAGS.augment_basic, basic_start=FLAGS.basic_start,
|
75 |
+
augment_texture=FLAGS.augment_texture, p_texture=FLAGS.p_texture,
|
76 |
+
augment_geom=FLAGS.augment_geom, p_geom=FLAGS.p_geom,
|
77 |
+
artistic_start=FLAGS.artistic_start, artistic_step=FLAGS.artistic_step,
|
78 |
+
img_path=FLAGS.img_path, save_log_path=save_log_path,
|
79 |
+
save_sample_path=save_sample_path, save_model_path=save_model_path,
|
80 |
+
test_data=FLAGS.test_data, test_model_path=FLAGS.test_model_path,
|
81 |
+
load_pretrain=FLAGS.load_pretrain, pre_train_path=FLAGS.pre_train_path)
|
82 |
+
|
83 |
+
if FLAGS.mode == 'TRAIN':
|
84 |
+
model.train()
|
85 |
+
else:
|
86 |
+
model.eval()
|
87 |
+
|
88 |
+
if __name__ == '__main__':
|
89 |
+
tf.app.run()
|
MakeItTalk/thirdparty/face_of_art/old/run_tests_template.py
CHANGED
@@ -1,50 +1,50 @@
|
|
1 |
-
import tensorflow as tf
|
2 |
-
from deep_heatmaps_model_primary_valid import DeepHeatmapsModel
|
3 |
-
import os
|
4 |
-
import numpy as np
|
5 |
-
|
6 |
-
num_tests = 10
|
7 |
-
params = np.logspace(-8, -2, num_tests)
|
8 |
-
max_iter = 80000
|
9 |
-
|
10 |
-
output_dir = 'tests_lr_fusion'
|
11 |
-
data_dir = '../conventional_landmark_detection_dataset'
|
12 |
-
|
13 |
-
flags = tf.app.flags
|
14 |
-
flags.DEFINE_string('output_dir', output_dir, "directory for saving the log file")
|
15 |
-
flags.DEFINE_string('img_path', data_dir, "data directory")
|
16 |
-
FLAGS = flags.FLAGS
|
17 |
-
|
18 |
-
if not os.path.exists(FLAGS.output_dir):
|
19 |
-
os.mkdir(FLAGS.output_dir)
|
20 |
-
|
21 |
-
for param in params:
|
22 |
-
test_name = str(param)
|
23 |
-
test_dir = os.path.join(FLAGS.output_dir,test_name)
|
24 |
-
if not os.path.exists(test_dir):
|
25 |
-
os.mkdir(test_dir)
|
26 |
-
|
27 |
-
print '##### RUNNING TESTS ##### current directory:', test_dir
|
28 |
-
|
29 |
-
save_model_path = os.path.join(test_dir, 'model')
|
30 |
-
save_sample_path = os.path.join(test_dir, 'sample')
|
31 |
-
save_log_path = os.path.join(test_dir, 'logs')
|
32 |
-
|
33 |
-
# create directories if not exist
|
34 |
-
if not os.path.exists(save_model_path):
|
35 |
-
os.mkdir(save_model_path)
|
36 |
-
if not os.path.exists(save_sample_path):
|
37 |
-
os.mkdir(save_sample_path)
|
38 |
-
if not os.path.exists(save_log_path):
|
39 |
-
os.mkdir(save_log_path)
|
40 |
-
|
41 |
-
tf.reset_default_graph() # reset graph
|
42 |
-
|
43 |
-
model = DeepHeatmapsModel(mode='TRAIN', train_iter=max_iter, learning_rate=param, momentum=0.95, step=80000,
|
44 |
-
gamma=0.1, batch_size=4, image_size=256, c_dim=3, num_landmarks=68,
|
45 |
-
augment_basic=True, basic_start=0, augment_texture=True, p_texture=0.5,
|
46 |
-
augment_geom=True, p_geom=0.5, artistic_start=0, artistic_step=10,
|
47 |
-
img_path=FLAGS.img_path, save_log_path=save_log_path, save_sample_path=save_sample_path,
|
48 |
-
save_model_path=save_model_path)
|
49 |
-
|
50 |
-
model.train()
|
|
|
1 |
+
import tensorflow as tf
|
2 |
+
from deep_heatmaps_model_primary_valid import DeepHeatmapsModel
|
3 |
+
import os
|
4 |
+
import numpy as np
|
5 |
+
|
6 |
+
num_tests = 10
|
7 |
+
params = np.logspace(-8, -2, num_tests)
|
8 |
+
max_iter = 80000
|
9 |
+
|
10 |
+
output_dir = 'tests_lr_fusion'
|
11 |
+
data_dir = '../conventional_landmark_detection_dataset'
|
12 |
+
|
13 |
+
flags = tf.app.flags
|
14 |
+
flags.DEFINE_string('output_dir', output_dir, "directory for saving the log file")
|
15 |
+
flags.DEFINE_string('img_path', data_dir, "data directory")
|
16 |
+
FLAGS = flags.FLAGS
|
17 |
+
|
18 |
+
if not os.path.exists(FLAGS.output_dir):
|
19 |
+
os.mkdir(FLAGS.output_dir)
|
20 |
+
|
21 |
+
for param in params:
|
22 |
+
test_name = str(param)
|
23 |
+
test_dir = os.path.join(FLAGS.output_dir,test_name)
|
24 |
+
if not os.path.exists(test_dir):
|
25 |
+
os.mkdir(test_dir)
|
26 |
+
|
27 |
+
print '##### RUNNING TESTS ##### current directory:', test_dir
|
28 |
+
|
29 |
+
save_model_path = os.path.join(test_dir, 'model')
|
30 |
+
save_sample_path = os.path.join(test_dir, 'sample')
|
31 |
+
save_log_path = os.path.join(test_dir, 'logs')
|
32 |
+
|
33 |
+
# create directories if not exist
|
34 |
+
if not os.path.exists(save_model_path):
|
35 |
+
os.mkdir(save_model_path)
|
36 |
+
if not os.path.exists(save_sample_path):
|
37 |
+
os.mkdir(save_sample_path)
|
38 |
+
if not os.path.exists(save_log_path):
|
39 |
+
os.mkdir(save_log_path)
|
40 |
+
|
41 |
+
tf.reset_default_graph() # reset graph
|
42 |
+
|
43 |
+
model = DeepHeatmapsModel(mode='TRAIN', train_iter=max_iter, learning_rate=param, momentum=0.95, step=80000,
|
44 |
+
gamma=0.1, batch_size=4, image_size=256, c_dim=3, num_landmarks=68,
|
45 |
+
augment_basic=True, basic_start=0, augment_texture=True, p_texture=0.5,
|
46 |
+
augment_geom=True, p_geom=0.5, artistic_start=0, artistic_step=10,
|
47 |
+
img_path=FLAGS.img_path, save_log_path=save_log_path, save_sample_path=save_sample_path,
|
48 |
+
save_model_path=save_model_path)
|
49 |
+
|
50 |
+
model.train()
|
MakeItTalk/thirdparty/face_of_art/old/temp/Untitled.rtf
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
-
{\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf470
|
2 |
-
{\fonttbl\f0\fswiss\fcharset0 Helvetica;}
|
3 |
-
{\colortbl;\red255\green255\blue255;}
|
4 |
-
\paperw11900\paperh16840\margl1440\margr1440\vieww10800\viewh8400\viewkind0
|
5 |
-
\pard\tx566\tx1133\tx1700\tx2267\tx2834\tx3401\tx3968\tx4535\tx5102\tx5669\tx6236\tx6803\pardirnatural\partightenfactor0
|
6 |
-
|
7 |
\f0\fs24 \cf0 a}
|
|
|
1 |
+
{\rtf1\ansi\ansicpg1252\cocoartf1404\cocoasubrtf470
|
2 |
+
{\fonttbl\f0\fswiss\fcharset0 Helvetica;}
|
3 |
+
{\colortbl;\red255\green255\blue255;}
|
4 |
+
\paperw11900\paperh16840\margl1440\margr1440\vieww10800\viewh8400\viewkind0
|
5 |
+
\pard\tx566\tx1133\tx1700\tx2267\tx2834\tx3401\tx3968\tx4535\tx5102\tx5669\tx6236\tx6803\pardirnatural\partightenfactor0
|
6 |
+
|
7 |
\f0\fs24 \cf0 a}
|
MakeItTalk/thirdparty/face_of_art/old/temp/create_art_data.py
CHANGED
@@ -1,132 +1,132 @@
|
|
1 |
-
from create_art_data_functions import *
|
2 |
-
from scipy.misc import imsave
|
3 |
-
import sys
|
4 |
-
|
5 |
-
|
6 |
-
'''THIS SCRIPT CREATES PRE-AUGMENTED DATA TO SAVE TRAINING TIME (ARTISTIC OR BASIC AUGMENTATION):
|
7 |
-
under the folder *outdir*, it will create a separate folder for each epoch. the folder will
|
8 |
-
contain the augmented images and matching landmark (pts) files.'''
|
9 |
-
|
10 |
-
# parameter for calculating number of epochs
|
11 |
-
num_train_images = 3148 # number of training images
|
12 |
-
train_iter = 100000 # number of training iterations
|
13 |
-
batch_size = 6 # batch size in training
|
14 |
-
num_epochs = int(np.ceil((1. * train_iter) / (1. * num_train_images / batch_size)))+1
|
15 |
-
|
16 |
-
# augmentation parameters
|
17 |
-
num_augs = 9 # number of style transfer augmented images
|
18 |
-
aug_geom = True # use artistic geometric augmentation?
|
19 |
-
aug_texture = True # use artistic texture augmentation?
|
20 |
-
|
21 |
-
# image parameters
|
22 |
-
bb_type = 'gt' # face bounding-box type (gt/init)
|
23 |
-
margin = 0.25 # margin for face crops - % of bb size
|
24 |
-
image_size = 256 # image size
|
25 |
-
|
26 |
-
# data-sets image paths
|
27 |
-
dataset = 'training' # dataset to augment (training/full/common/challenging/test)
|
28 |
-
img_dir = '/Users/arik/Dropbox/a_mac_thesis/face_heatmap_networks/conventional_landmark_detection_dataset/'
|
29 |
-
train_crop_dir = 'crop_gt_margin_0.25' # directory of train images cropped to bb (+margin)
|
30 |
-
img_dir_ns = os.path.join(img_dir, train_crop_dir+'_ns') # dir of train imgs cropped to bb + style transfer
|
31 |
-
outdir = '/Users/arik/Desktop/epoch_data' # directory for saving augmented data
|
32 |
-
|
33 |
-
# other parameters
|
34 |
-
min_epoch_to_save = 0 # start saving images from this epoch (first epoch is 0)
|
35 |
-
debug_data_size = 15
|
36 |
-
debug = False
|
37 |
-
random_seed = 1234 # random seed for numpy
|
38 |
-
|
39 |
-
########################################################################################
|
40 |
-
if aug_texture and img_dir_ns is None:
|
41 |
-
print('\n *** ERROR: aug_texture is True, and img_dir_ns is None.\n'
|
42 |
-
'please specify path for img_dir_ns to augment image texture!')
|
43 |
-
sys.exit()
|
44 |
-
|
45 |
-
if not os.path.exists(outdir):
|
46 |
-
os.mkdir(outdir)
|
47 |
-
|
48 |
-
gt = (bb_type == 'gt')
|
49 |
-
bb_dir = os.path.join(img_dir, 'Bounding_Boxes')
|
50 |
-
|
51 |
-
if dataset == 'training':
|
52 |
-
mode = 'TRAIN'
|
53 |
-
else:
|
54 |
-
mode = 'TEST'
|
55 |
-
bb_dictionary = load_bb_dictionary(bb_dir, mode=mode, test_data=dataset)
|
56 |
-
|
57 |
-
aug_geom_dir = os.path.join(outdir, 'aug_geom')
|
58 |
-
aug_texture_dir = os.path.join(outdir, 'aug_texture')
|
59 |
-
aug_geom_texture_dir = os.path.join(outdir, 'aug_geom_texture')
|
60 |
-
aug_basic_dir = os.path.join(outdir, 'aug_basic')
|
61 |
-
|
62 |
-
if not aug_geom and aug_texture:
|
63 |
-
save_aug_path = aug_texture_dir
|
64 |
-
elif aug_geom and not aug_texture:
|
65 |
-
save_aug_path = aug_geom_dir
|
66 |
-
elif aug_geom and aug_texture:
|
67 |
-
save_aug_path = aug_geom_texture_dir
|
68 |
-
else:
|
69 |
-
save_aug_path = aug_basic_dir
|
70 |
-
|
71 |
-
print ('saving augmented images: aug_geom=' + str(aug_geom) + ' aug_texture=' + str(aug_texture) +
|
72 |
-
' : ' + str(save_aug_path))
|
73 |
-
|
74 |
-
if not os.path.exists(save_aug_path):
|
75 |
-
os.mkdir(save_aug_path)
|
76 |
-
|
77 |
-
np.random.seed(random_seed)
|
78 |
-
ns_inds = np.arange(num_augs)
|
79 |
-
|
80 |
-
for i in range(num_epochs):
|
81 |
-
print ('saving augmented images of epoch %d/%d' % (i, num_epochs-1))
|
82 |
-
if not os.path.exists(os.path.join(save_aug_path, str(i))) and i > min_epoch_to_save - 1:
|
83 |
-
os.mkdir(os.path.join(save_aug_path, str(i)))
|
84 |
-
|
85 |
-
if i % num_augs == 0:
|
86 |
-
np.random.shuffle(ns_inds)
|
87 |
-
|
88 |
-
if not aug_geom and aug_texture:
|
89 |
-
img_list = load_menpo_image_list_no_geom(
|
90 |
-
img_dir=img_dir, train_crop_dir=train_crop_dir, img_dir_ns=img_dir_ns, mode='TRAIN',
|
91 |
-
bb_dictionary=bb_dictionary,
|
92 |
-
image_size=image_size, margin=margin, bb_type=bb_type, augment_basic=True,
|
93 |
-
augment_texture=True, p_texture=1.,
|
94 |
-
augment_geom=True, p_geom=1., ns_ind=ns_inds[i % num_augs], dataset=dataset)
|
95 |
-
elif aug_geom and not aug_texture:
|
96 |
-
img_list = load_menpo_image_list_no_texture(
|
97 |
-
img_dir=img_dir, train_crop_dir=train_crop_dir, img_dir_ns=img_dir_ns, mode='TRAIN',
|
98 |
-
bb_dictionary=bb_dictionary,
|
99 |
-
image_size=image_size, margin=margin, bb_type=bb_type, augment_basic=True,
|
100 |
-
augment_texture=True, p_texture=1.,
|
101 |
-
augment_geom=True, p_geom=1., ns_ind=ns_inds[i % num_augs], dataset=dataset)
|
102 |
-
elif aug_geom and aug_texture:
|
103 |
-
img_list = load_menpo_image_list(
|
104 |
-
img_dir=img_dir, train_crop_dir=train_crop_dir, img_dir_ns=img_dir_ns, mode='TRAIN',
|
105 |
-
bb_dictionary=bb_dictionary,
|
106 |
-
image_size=image_size, margin=margin, bb_type=bb_type, augment_basic=True,
|
107 |
-
augment_texture=True, p_texture=1.,
|
108 |
-
augment_geom=True, p_geom=1., ns_ind=ns_inds[i % num_augs], dataset=dataset)
|
109 |
-
else:
|
110 |
-
img_list = load_menpo_image_list_no_artistic(
|
111 |
-
img_dir=img_dir, train_crop_dir=train_crop_dir, img_dir_ns=img_dir_ns, mode='TRAIN',
|
112 |
-
bb_dictionary=bb_dictionary,
|
113 |
-
image_size=image_size, margin=margin, bb_type=bb_type, augment_basic=True,
|
114 |
-
augment_texture=True, p_texture=1.,
|
115 |
-
augment_geom=True, p_geom=1., ns_ind=ns_inds[i % num_augs], dataset=dataset)
|
116 |
-
|
117 |
-
if debug:
|
118 |
-
img_list = img_list[:debug_data_size]
|
119 |
-
|
120 |
-
for im in img_list:
|
121 |
-
im_path = os.path.join(save_aug_path, str(i), im.path.name.split('.')[0] + '.png')
|
122 |
-
pts_path = os.path.join(save_aug_path, str(i), im.path.name.split('.')[0] + '.pts')
|
123 |
-
if i > min_epoch_to_save - 1:
|
124 |
-
if not os.path.exists(im_path):
|
125 |
-
if im.pixels.shape[0] == 1:
|
126 |
-
im_pixels = gray2rgb(np.squeeze(im.pixels))
|
127 |
-
else:
|
128 |
-
im_pixels = np.rollaxis(im.pixels, 0, 3)
|
129 |
-
imsave(im_path, im_pixels)
|
130 |
-
if not os.path.exists(pts_path):
|
131 |
-
mio.export_landmark_file(im.landmarks['PTS'], pts_path, overwrite=True)
|
132 |
print ('DONE!')
|
|
|
1 |
+
from create_art_data_functions import *
|
2 |
+
from scipy.misc import imsave
|
3 |
+
import sys
|
4 |
+
|
5 |
+
|
6 |
+
'''THIS SCRIPT CREATES PRE-AUGMENTED DATA TO SAVE TRAINING TIME (ARTISTIC OR BASIC AUGMENTATION):
|
7 |
+
under the folder *outdir*, it will create a separate folder for each epoch. the folder will
|
8 |
+
contain the augmented images and matching landmark (pts) files.'''
|
9 |
+
|
10 |
+
# parameter for calculating number of epochs
|
11 |
+
num_train_images = 3148 # number of training images
|
12 |
+
train_iter = 100000 # number of training iterations
|
13 |
+
batch_size = 6 # batch size in training
|
14 |
+
num_epochs = int(np.ceil((1. * train_iter) / (1. * num_train_images / batch_size)))+1
|
15 |
+
|
16 |
+
# augmentation parameters
|
17 |
+
num_augs = 9 # number of style transfer augmented images
|
18 |
+
aug_geom = True # use artistic geometric augmentation?
|
19 |
+
aug_texture = True # use artistic texture augmentation?
|
20 |
+
|
21 |
+
# image parameters
|
22 |
+
bb_type = 'gt' # face bounding-box type (gt/init)
|
23 |
+
margin = 0.25 # margin for face crops - % of bb size
|
24 |
+
image_size = 256 # image size
|
25 |
+
|
26 |
+
# data-sets image paths
|
27 |
+
dataset = 'training' # dataset to augment (training/full/common/challenging/test)
|
28 |
+
img_dir = '/Users/arik/Dropbox/a_mac_thesis/face_heatmap_networks/conventional_landmark_detection_dataset/'
|
29 |
+
train_crop_dir = 'crop_gt_margin_0.25' # directory of train images cropped to bb (+margin)
|
30 |
+
img_dir_ns = os.path.join(img_dir, train_crop_dir+'_ns') # dir of train imgs cropped to bb + style transfer
|
31 |
+
outdir = '/Users/arik/Desktop/epoch_data' # directory for saving augmented data
|
32 |
+
|
33 |
+
# other parameters
|
34 |
+
min_epoch_to_save = 0 # start saving images from this epoch (first epoch is 0)
|
35 |
+
debug_data_size = 15
|
36 |
+
debug = False
|
37 |
+
random_seed = 1234 # random seed for numpy
|
38 |
+
|
39 |
+
########################################################################################
|
40 |
+
if aug_texture and img_dir_ns is None:
|
41 |
+
print('\n *** ERROR: aug_texture is True, and img_dir_ns is None.\n'
|
42 |
+
'please specify path for img_dir_ns to augment image texture!')
|
43 |
+
sys.exit()
|
44 |
+
|
45 |
+
if not os.path.exists(outdir):
|
46 |
+
os.mkdir(outdir)
|
47 |
+
|
48 |
+
gt = (bb_type == 'gt')
|
49 |
+
bb_dir = os.path.join(img_dir, 'Bounding_Boxes')
|
50 |
+
|
51 |
+
if dataset == 'training':
|
52 |
+
mode = 'TRAIN'
|
53 |
+
else:
|
54 |
+
mode = 'TEST'
|
55 |
+
bb_dictionary = load_bb_dictionary(bb_dir, mode=mode, test_data=dataset)
|
56 |
+
|
57 |
+
aug_geom_dir = os.path.join(outdir, 'aug_geom')
|
58 |
+
aug_texture_dir = os.path.join(outdir, 'aug_texture')
|
59 |
+
aug_geom_texture_dir = os.path.join(outdir, 'aug_geom_texture')
|
60 |
+
aug_basic_dir = os.path.join(outdir, 'aug_basic')
|
61 |
+
|
62 |
+
if not aug_geom and aug_texture:
|
63 |
+
save_aug_path = aug_texture_dir
|
64 |
+
elif aug_geom and not aug_texture:
|
65 |
+
save_aug_path = aug_geom_dir
|
66 |
+
elif aug_geom and aug_texture:
|
67 |
+
save_aug_path = aug_geom_texture_dir
|
68 |
+
else:
|
69 |
+
save_aug_path = aug_basic_dir
|
70 |
+
|
71 |
+
print ('saving augmented images: aug_geom=' + str(aug_geom) + ' aug_texture=' + str(aug_texture) +
|
72 |
+
' : ' + str(save_aug_path))
|
73 |
+
|
74 |
+
if not os.path.exists(save_aug_path):
|
75 |
+
os.mkdir(save_aug_path)
|
76 |
+
|
77 |
+
np.random.seed(random_seed)
|
78 |
+
ns_inds = np.arange(num_augs)
|
79 |
+
|
80 |
+
for i in range(num_epochs):
|
81 |
+
print ('saving augmented images of epoch %d/%d' % (i, num_epochs-1))
|
82 |
+
if not os.path.exists(os.path.join(save_aug_path, str(i))) and i > min_epoch_to_save - 1:
|
83 |
+
os.mkdir(os.path.join(save_aug_path, str(i)))
|
84 |
+
|
85 |
+
if i % num_augs == 0:
|
86 |
+
np.random.shuffle(ns_inds)
|
87 |
+
|
88 |
+
if not aug_geom and aug_texture:
|
89 |
+
img_list = load_menpo_image_list_no_geom(
|
90 |
+
img_dir=img_dir, train_crop_dir=train_crop_dir, img_dir_ns=img_dir_ns, mode='TRAIN',
|
91 |
+
bb_dictionary=bb_dictionary,
|
92 |
+
image_size=image_size, margin=margin, bb_type=bb_type, augment_basic=True,
|
93 |
+
augment_texture=True, p_texture=1.,
|
94 |
+
augment_geom=True, p_geom=1., ns_ind=ns_inds[i % num_augs], dataset=dataset)
|
95 |
+
elif aug_geom and not aug_texture:
|
96 |
+
img_list = load_menpo_image_list_no_texture(
|
97 |
+
img_dir=img_dir, train_crop_dir=train_crop_dir, img_dir_ns=img_dir_ns, mode='TRAIN',
|
98 |
+
bb_dictionary=bb_dictionary,
|
99 |
+
image_size=image_size, margin=margin, bb_type=bb_type, augment_basic=True,
|
100 |
+
augment_texture=True, p_texture=1.,
|
101 |
+
augment_geom=True, p_geom=1., ns_ind=ns_inds[i % num_augs], dataset=dataset)
|
102 |
+
elif aug_geom and aug_texture:
|
103 |
+
img_list = load_menpo_image_list(
|
104 |
+
img_dir=img_dir, train_crop_dir=train_crop_dir, img_dir_ns=img_dir_ns, mode='TRAIN',
|
105 |
+
bb_dictionary=bb_dictionary,
|
106 |
+
image_size=image_size, margin=margin, bb_type=bb_type, augment_basic=True,
|
107 |
+
augment_texture=True, p_texture=1.,
|
108 |
+
augment_geom=True, p_geom=1., ns_ind=ns_inds[i % num_augs], dataset=dataset)
|
109 |
+
else:
|
110 |
+
img_list = load_menpo_image_list_no_artistic(
|
111 |
+
img_dir=img_dir, train_crop_dir=train_crop_dir, img_dir_ns=img_dir_ns, mode='TRAIN',
|
112 |
+
bb_dictionary=bb_dictionary,
|
113 |
+
image_size=image_size, margin=margin, bb_type=bb_type, augment_basic=True,
|
114 |
+
augment_texture=True, p_texture=1.,
|
115 |
+
augment_geom=True, p_geom=1., ns_ind=ns_inds[i % num_augs], dataset=dataset)
|
116 |
+
|
117 |
+
if debug:
|
118 |
+
img_list = img_list[:debug_data_size]
|
119 |
+
|
120 |
+
for im in img_list:
|
121 |
+
im_path = os.path.join(save_aug_path, str(i), im.path.name.split('.')[0] + '.png')
|
122 |
+
pts_path = os.path.join(save_aug_path, str(i), im.path.name.split('.')[0] + '.pts')
|
123 |
+
if i > min_epoch_to_save - 1:
|
124 |
+
if not os.path.exists(im_path):
|
125 |
+
if im.pixels.shape[0] == 1:
|
126 |
+
im_pixels = gray2rgb(np.squeeze(im.pixels))
|
127 |
+
else:
|
128 |
+
im_pixels = np.rollaxis(im.pixels, 0, 3)
|
129 |
+
imsave(im_path, im_pixels)
|
130 |
+
if not os.path.exists(pts_path):
|
131 |
+
mio.export_landmark_file(im.landmarks['PTS'], pts_path, overwrite=True)
|
132 |
print ('DONE!')
|
MakeItTalk/thirdparty/face_of_art/old/temp/create_art_data_functions.py
CHANGED
@@ -1,318 +1,318 @@
|
|
1 |
-
from menpo_functions import *
|
2 |
-
from data_loading_functions import *
|
3 |
-
from menpo.shape import bounding_box
|
4 |
-
from menpo.transform import Translation, Rotation
|
5 |
-
|
6 |
-
|
7 |
-
def augment_face_image(img, image_size=256, crop_size=248, angle_range=30, flip=True, warp_mode='constant'):
|
8 |
-
"""basic image augmentation: random crop, rotation and horizontal flip"""
|
9 |
-
|
10 |
-
#from menpo
|
11 |
-
def round_image_shape(shape, round):
|
12 |
-
if round not in ['ceil', 'round', 'floor']:
|
13 |
-
raise ValueError('round must be either ceil, round or floor')
|
14 |
-
# Ensure that the '+' operator means concatenate tuples
|
15 |
-
return tuple(getattr(np, round)(shape).astype(np.int))
|
16 |
-
|
17 |
-
# taken from MDM
|
18 |
-
def mirror_landmarks_68(lms, im_size):
|
19 |
-
return PointCloud(abs(np.array([0, im_size[1]]) - lms.as_vector(
|
20 |
-
).reshape(-1, 2))[mirrored_parts_68])
|
21 |
-
|
22 |
-
# taken from MDM
|
23 |
-
def mirror_image(im):
|
24 |
-
im = im.copy()
|
25 |
-
im.pixels = im.pixels[..., ::-1].copy()
|
26 |
-
|
27 |
-
for group in im.landmarks:
|
28 |
-
lms = im.landmarks[group]
|
29 |
-
if lms.points.shape[0] == 68:
|
30 |
-
im.landmarks[group] = mirror_landmarks_68(lms, im.shape)
|
31 |
-
|
32 |
-
return im
|
33 |
-
|
34 |
-
flip_rand = np.random.random() > 0.5
|
35 |
-
# rot_rand = np.random.random() > 0.5
|
36 |
-
# crop_rand = np.random.random() > 0.5
|
37 |
-
rot_rand = True # like ECT
|
38 |
-
crop_rand = True # like ECT
|
39 |
-
|
40 |
-
if crop_rand:
|
41 |
-
lim = image_size - crop_size
|
42 |
-
min_crop_inds = np.random.randint(0, lim, 2)
|
43 |
-
max_crop_inds = min_crop_inds + crop_size
|
44 |
-
img = img.crop(min_crop_inds, max_crop_inds)
|
45 |
-
|
46 |
-
if flip and flip_rand:
|
47 |
-
img = mirror_image(img)
|
48 |
-
|
49 |
-
if rot_rand:
|
50 |
-
rot_angle = 2 * angle_range * np.random.random_sample() - angle_range
|
51 |
-
# img = img.rotate_ccw_about_centre(rot_angle)
|
52 |
-
|
53 |
-
# Get image's bounding box coordinates
|
54 |
-
bbox = bounding_box((0, 0), [img.shape[0] - 1, img.shape[1] - 1])
|
55 |
-
# Translate to origin and rotate counter-clockwise
|
56 |
-
trans = Translation(-img.centre(),
|
57 |
-
skip_checks=True).compose_before(
|
58 |
-
Rotation.init_from_2d_ccw_angle(rot_angle, degrees=True))
|
59 |
-
rotated_bbox = trans.apply(bbox)
|
60 |
-
# Create new translation so that min bbox values go to 0
|
61 |
-
t = Translation(-rotated_bbox.bounds()[0])
|
62 |
-
trans.compose_before_inplace(t)
|
63 |
-
rotated_bbox = trans.apply(bbox)
|
64 |
-
# Output image's shape is the range of the rotated bounding box
|
65 |
-
# while respecting the users rounding preference.
|
66 |
-
shape = round_image_shape(rotated_bbox.range() + 1, 'round')
|
67 |
-
|
68 |
-
img = img.warp_to_shape(
|
69 |
-
shape, trans.pseudoinverse(), warp_landmarks=True, mode=warp_mode)
|
70 |
-
|
71 |
-
img = img.resize([image_size, image_size])
|
72 |
-
|
73 |
-
return img
|
74 |
-
|
75 |
-
|
76 |
-
def augment_menpo_img_ns(img, img_dir_ns, p_ns=0, ns_ind=None):
|
77 |
-
"""texture style image augmentation using stylized copies in *img_dir_ns*"""
|
78 |
-
|
79 |
-
if p_ns > 0.5:
|
80 |
-
ns_augs = glob(os.path.join(img_dir_ns, img.path.name.split('.')[0] + '_ns*'))
|
81 |
-
num_augs = len(ns_augs)
|
82 |
-
if num_augs > 0:
|
83 |
-
if ns_ind is None or ns_ind >= num_augs:
|
84 |
-
ns_ind = np.random.randint(0, num_augs)
|
85 |
-
ns_aug = mio.import_image(ns_augs[ns_ind])
|
86 |
-
img.pixels = ns_aug.pixels
|
87 |
-
return img
|
88 |
-
|
89 |
-
|
90 |
-
def augment_menpo_img_ns_dont_apply(img, img_dir_ns, p_ns=0, ns_ind=None):
|
91 |
-
"""texture style image augmentation using stylized copies in *img_dir_ns*"""
|
92 |
-
|
93 |
-
if p_ns > 0.5:
|
94 |
-
ns_augs = glob(os.path.join(img_dir_ns, img.path.name.split('.')[0] + '_ns*'))
|
95 |
-
num_augs = len(ns_augs)
|
96 |
-
if num_augs > 0:
|
97 |
-
if ns_ind is None or ns_ind >= num_augs:
|
98 |
-
ns_ind = np.random.randint(0, num_augs)
|
99 |
-
# ns_aug = mio.import_image(ns_augs[ns_ind])
|
100 |
-
# ns_pixels = ns_aug.pixels
|
101 |
-
return img
|
102 |
-
|
103 |
-
|
104 |
-
def augment_menpo_img_geom_dont_apply(img, p_geom=0):
|
105 |
-
"""geometric style image augmentation using random face deformations"""
|
106 |
-
|
107 |
-
if p_geom > 0.5:
|
108 |
-
lms_geom_warp = deform_face_geometric_style(img.landmarks['PTS'].points.copy(), p_scale=p_geom, p_shift=p_geom)
|
109 |
-
return img
|
110 |
-
|
111 |
-
|
112 |
-
def augment_menpo_img_geom(img, p_geom=0):
|
113 |
-
"""geometric style image augmentation using random face deformations"""
|
114 |
-
|
115 |
-
if p_geom > 0.5:
|
116 |
-
lms_geom_warp = deform_face_geometric_style(img.landmarks['PTS'].points.copy(), p_scale=p_geom, p_shift=p_geom)
|
117 |
-
img=warp_face_image_tps(img, PointCloud(lms_geom_warp))
|
118 |
-
return img
|
119 |
-
|
120 |
-
|
121 |
-
def load_menpo_image_list(
|
122 |
-
img_dir, train_crop_dir, img_dir_ns, mode, bb_dictionary=None, image_size=256, margin=0.25,
|
123 |
-
bb_type='gt', test_data='full', augment_basic=True, augment_texture=False, p_texture=0,
|
124 |
-
augment_geom=False, p_geom=0, verbose=False,ns_ind=None, dataset='training'):
|
125 |
-
|
126 |
-
def crop_to_face_image_gt(img):
|
127 |
-
return crop_to_face_image(img, bb_dictionary, gt=True, margin=margin, image_size=image_size)
|
128 |
-
|
129 |
-
def crop_to_face_image_init(img):
|
130 |
-
return crop_to_face_image(img, bb_dictionary, gt=False, margin=margin, image_size=image_size)
|
131 |
-
|
132 |
-
def augment_menpo_img_ns_rand(img):
|
133 |
-
return augment_menpo_img_ns(img, img_dir_ns, p_ns=1. * (np.random.rand() < p_texture),ns_ind=ns_ind)
|
134 |
-
|
135 |
-
def augment_menpo_img_geom_rand(img):
|
136 |
-
return augment_menpo_img_geom(img, p_geom=1. * (np.random.rand() < p_geom))
|
137 |
-
|
138 |
-
if mode is 'TRAIN':
|
139 |
-
if train_crop_dir is None:
|
140 |
-
img_set_dir = os.path.join(img_dir, dataset+'_set')
|
141 |
-
out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False)
|
142 |
-
if bb_type is 'gt':
|
143 |
-
out_image_list = out_image_list.map(crop_to_face_image_gt)
|
144 |
-
elif bb_type is 'init':
|
145 |
-
out_image_list = out_image_list.map(crop_to_face_image_init)
|
146 |
-
else:
|
147 |
-
img_set_dir = os.path.join(img_dir, train_crop_dir)
|
148 |
-
out_image_list = mio.import_images(img_set_dir, verbose=verbose)
|
149 |
-
|
150 |
-
if augment_texture and img_dir_ns is not None:
|
151 |
-
out_image_list = out_image_list.map(augment_menpo_img_ns_rand)
|
152 |
-
if augment_geom:
|
153 |
-
out_image_list = out_image_list.map(augment_menpo_img_geom_rand)
|
154 |
-
if augment_basic:
|
155 |
-
out_image_list = out_image_list.map(augment_face_image)
|
156 |
-
|
157 |
-
else:
|
158 |
-
img_set_dir = os.path.join(img_dir, test_data + '_set')
|
159 |
-
if test_data in ['full', 'challenging', 'common', 'training', 'test']:
|
160 |
-
out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False)
|
161 |
-
if bb_type is 'gt':
|
162 |
-
out_image_list = out_image_list.map(crop_to_face_image_gt)
|
163 |
-
elif bb_type is 'init':
|
164 |
-
out_image_list = out_image_list.map(crop_to_face_image_init)
|
165 |
-
else:
|
166 |
-
out_image_list = mio.import_images(img_set_dir, verbose=verbose)
|
167 |
-
|
168 |
-
return out_image_list
|
169 |
-
|
170 |
-
|
171 |
-
def load_menpo_image_list_no_geom(
|
172 |
-
img_dir, train_crop_dir, img_dir_ns, mode, bb_dictionary=None, image_size=256, margin=0.25,
|
173 |
-
bb_type='gt', test_data='full', augment_basic=True, augment_texture=False, p_texture=0,
|
174 |
-
augment_geom=False, p_geom=0, verbose=False,ns_ind=None, dataset='training'):
|
175 |
-
|
176 |
-
def crop_to_face_image_gt(img):
|
177 |
-
return crop_to_face_image(img, bb_dictionary, gt=True, margin=margin, image_size=image_size)
|
178 |
-
|
179 |
-
def crop_to_face_image_init(img):
|
180 |
-
return crop_to_face_image(img, bb_dictionary, gt=False, margin=margin, image_size=image_size)
|
181 |
-
|
182 |
-
def augment_menpo_img_ns_rand(img):
|
183 |
-
return augment_menpo_img_ns(img, img_dir_ns, p_ns=1. * (np.random.rand() < p_texture),ns_ind=ns_ind)
|
184 |
-
|
185 |
-
def augment_menpo_img_geom_rand(img):
|
186 |
-
return augment_menpo_img_geom_dont_apply(img, p_geom=1. * (np.random.rand() < p_geom))
|
187 |
-
|
188 |
-
if mode is 'TRAIN':
|
189 |
-
if train_crop_dir is None:
|
190 |
-
img_set_dir = os.path.join(img_dir, dataset+'_set')
|
191 |
-
out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False)
|
192 |
-
if bb_type is 'gt':
|
193 |
-
out_image_list = out_image_list.map(crop_to_face_image_gt)
|
194 |
-
elif bb_type is 'init':
|
195 |
-
out_image_list = out_image_list.map(crop_to_face_image_init)
|
196 |
-
else:
|
197 |
-
img_set_dir = os.path.join(img_dir, train_crop_dir)
|
198 |
-
out_image_list = mio.import_images(img_set_dir, verbose=verbose)
|
199 |
-
|
200 |
-
if augment_texture and img_dir_ns is not None:
|
201 |
-
out_image_list = out_image_list.map(augment_menpo_img_ns_rand)
|
202 |
-
if augment_geom:
|
203 |
-
out_image_list = out_image_list.map(augment_menpo_img_geom_rand)
|
204 |
-
if augment_basic:
|
205 |
-
out_image_list = out_image_list.map(augment_face_image)
|
206 |
-
|
207 |
-
else:
|
208 |
-
img_set_dir = os.path.join(img_dir, test_data + '_set')
|
209 |
-
if test_data in ['full', 'challenging', 'common', 'training', 'test']:
|
210 |
-
out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False)
|
211 |
-
if bb_type is 'gt':
|
212 |
-
out_image_list = out_image_list.map(crop_to_face_image_gt)
|
213 |
-
elif bb_type is 'init':
|
214 |
-
out_image_list = out_image_list.map(crop_to_face_image_init)
|
215 |
-
else:
|
216 |
-
out_image_list = mio.import_images(img_set_dir, verbose=verbose)
|
217 |
-
|
218 |
-
return out_image_list
|
219 |
-
|
220 |
-
|
221 |
-
def load_menpo_image_list_no_texture(
|
222 |
-
img_dir, train_crop_dir, img_dir_ns, mode, bb_dictionary=None, image_size=256, margin=0.25,
|
223 |
-
bb_type='gt', test_data='full', augment_basic=True, augment_texture=False, p_texture=0,
|
224 |
-
augment_geom=False, p_geom=0, verbose=False,ns_ind=None, dataset='training'):
|
225 |
-
|
226 |
-
def crop_to_face_image_gt(img):
|
227 |
-
return crop_to_face_image(img, bb_dictionary, gt=True, margin=margin, image_size=image_size)
|
228 |
-
|
229 |
-
def crop_to_face_image_init(img):
|
230 |
-
return crop_to_face_image(img, bb_dictionary, gt=False, margin=margin, image_size=image_size)
|
231 |
-
|
232 |
-
def augment_menpo_img_ns_rand(img):
|
233 |
-
return augment_menpo_img_ns_dont_apply(img, img_dir_ns, p_ns=1. * (np.random.rand() < p_texture),ns_ind=ns_ind)
|
234 |
-
|
235 |
-
def augment_menpo_img_geom_rand(img):
|
236 |
-
return augment_menpo_img_geom(img, p_geom=1. * (np.random.rand() < p_geom))
|
237 |
-
|
238 |
-
if mode is 'TRAIN':
|
239 |
-
if train_crop_dir is None:
|
240 |
-
img_set_dir = os.path.join(img_dir, dataset+'_set')
|
241 |
-
out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False)
|
242 |
-
if bb_type is 'gt':
|
243 |
-
out_image_list = out_image_list.map(crop_to_face_image_gt)
|
244 |
-
elif bb_type is 'init':
|
245 |
-
out_image_list = out_image_list.map(crop_to_face_image_init)
|
246 |
-
else:
|
247 |
-
img_set_dir = os.path.join(img_dir, train_crop_dir)
|
248 |
-
out_image_list = mio.import_images(img_set_dir, verbose=verbose)
|
249 |
-
|
250 |
-
if augment_texture and img_dir_ns is not None:
|
251 |
-
out_image_list = out_image_list.map(augment_menpo_img_ns_rand)
|
252 |
-
if augment_geom:
|
253 |
-
out_image_list = out_image_list.map(augment_menpo_img_geom_rand)
|
254 |
-
if augment_basic:
|
255 |
-
out_image_list = out_image_list.map(augment_face_image)
|
256 |
-
|
257 |
-
else:
|
258 |
-
img_set_dir = os.path.join(img_dir, test_data + '_set')
|
259 |
-
if test_data in ['full', 'challenging', 'common', 'training', 'test']:
|
260 |
-
out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False)
|
261 |
-
if bb_type is 'gt':
|
262 |
-
out_image_list = out_image_list.map(crop_to_face_image_gt)
|
263 |
-
elif bb_type is 'init':
|
264 |
-
out_image_list = out_image_list.map(crop_to_face_image_init)
|
265 |
-
else:
|
266 |
-
out_image_list = mio.import_images(img_set_dir, verbose=verbose)
|
267 |
-
|
268 |
-
return out_image_list
|
269 |
-
|
270 |
-
|
271 |
-
def load_menpo_image_list_no_artistic(
|
272 |
-
img_dir, train_crop_dir, img_dir_ns, mode, bb_dictionary=None, image_size=256, margin=0.25,
|
273 |
-
bb_type='gt', test_data='full', augment_basic=True, augment_texture=False, p_texture=0,
|
274 |
-
augment_geom=False, p_geom=0, verbose=False,ns_ind=None, dataset='training'):
|
275 |
-
|
276 |
-
def crop_to_face_image_gt(img):
|
277 |
-
return crop_to_face_image(img, bb_dictionary, gt=True, margin=margin, image_size=image_size)
|
278 |
-
|
279 |
-
def crop_to_face_image_init(img):
|
280 |
-
return crop_to_face_image(img, bb_dictionary, gt=False, margin=margin, image_size=image_size)
|
281 |
-
|
282 |
-
def augment_menpo_img_ns_rand(img):
|
283 |
-
return augment_menpo_img_ns_dont_apply(img, img_dir_ns, p_ns=1. * (np.random.rand() < p_texture),ns_ind=ns_ind)
|
284 |
-
|
285 |
-
def augment_menpo_img_geom_rand(img):
|
286 |
-
return augment_menpo_img_geom_dont_apply(img, p_geom=1. * (np.random.rand() < p_geom))
|
287 |
-
|
288 |
-
if mode is 'TRAIN':
|
289 |
-
if train_crop_dir is None:
|
290 |
-
img_set_dir = os.path.join(img_dir, dataset+'_set')
|
291 |
-
out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False)
|
292 |
-
if bb_type is 'gt':
|
293 |
-
out_image_list = out_image_list.map(crop_to_face_image_gt)
|
294 |
-
elif bb_type is 'init':
|
295 |
-
out_image_list = out_image_list.map(crop_to_face_image_init)
|
296 |
-
else:
|
297 |
-
img_set_dir = os.path.join(img_dir, train_crop_dir)
|
298 |
-
out_image_list = mio.import_images(img_set_dir, verbose=verbose)
|
299 |
-
|
300 |
-
if augment_texture and img_dir_ns is not None:
|
301 |
-
out_image_list = out_image_list.map(augment_menpo_img_ns_rand)
|
302 |
-
if augment_geom:
|
303 |
-
out_image_list = out_image_list.map(augment_menpo_img_geom_rand)
|
304 |
-
if augment_basic:
|
305 |
-
out_image_list = out_image_list.map(augment_face_image)
|
306 |
-
|
307 |
-
else:
|
308 |
-
img_set_dir = os.path.join(img_dir, test_data + '_set')
|
309 |
-
if test_data in ['full', 'challenging', 'common', 'training', 'test']:
|
310 |
-
out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False)
|
311 |
-
if bb_type is 'gt':
|
312 |
-
out_image_list = out_image_list.map(crop_to_face_image_gt)
|
313 |
-
elif bb_type is 'init':
|
314 |
-
out_image_list = out_image_list.map(crop_to_face_image_init)
|
315 |
-
else:
|
316 |
-
out_image_list = mio.import_images(img_set_dir, verbose=verbose)
|
317 |
-
|
318 |
return out_image_list
|
|
|
1 |
+
from menpo_functions import *
|
2 |
+
from data_loading_functions import *
|
3 |
+
from menpo.shape import bounding_box
|
4 |
+
from menpo.transform import Translation, Rotation
|
5 |
+
|
6 |
+
|
7 |
+
def augment_face_image(img, image_size=256, crop_size=248, angle_range=30, flip=True, warp_mode='constant'):
|
8 |
+
"""basic image augmentation: random crop, rotation and horizontal flip"""
|
9 |
+
|
10 |
+
#from menpo
|
11 |
+
def round_image_shape(shape, round):
|
12 |
+
if round not in ['ceil', 'round', 'floor']:
|
13 |
+
raise ValueError('round must be either ceil, round or floor')
|
14 |
+
# Ensure that the '+' operator means concatenate tuples
|
15 |
+
return tuple(getattr(np, round)(shape).astype(np.int))
|
16 |
+
|
17 |
+
# taken from MDM
|
18 |
+
def mirror_landmarks_68(lms, im_size):
|
19 |
+
return PointCloud(abs(np.array([0, im_size[1]]) - lms.as_vector(
|
20 |
+
).reshape(-1, 2))[mirrored_parts_68])
|
21 |
+
|
22 |
+
# taken from MDM
|
23 |
+
def mirror_image(im):
|
24 |
+
im = im.copy()
|
25 |
+
im.pixels = im.pixels[..., ::-1].copy()
|
26 |
+
|
27 |
+
for group in im.landmarks:
|
28 |
+
lms = im.landmarks[group]
|
29 |
+
if lms.points.shape[0] == 68:
|
30 |
+
im.landmarks[group] = mirror_landmarks_68(lms, im.shape)
|
31 |
+
|
32 |
+
return im
|
33 |
+
|
34 |
+
flip_rand = np.random.random() > 0.5
|
35 |
+
# rot_rand = np.random.random() > 0.5
|
36 |
+
# crop_rand = np.random.random() > 0.5
|
37 |
+
rot_rand = True # like ECT
|
38 |
+
crop_rand = True # like ECT
|
39 |
+
|
40 |
+
if crop_rand:
|
41 |
+
lim = image_size - crop_size
|
42 |
+
min_crop_inds = np.random.randint(0, lim, 2)
|
43 |
+
max_crop_inds = min_crop_inds + crop_size
|
44 |
+
img = img.crop(min_crop_inds, max_crop_inds)
|
45 |
+
|
46 |
+
if flip and flip_rand:
|
47 |
+
img = mirror_image(img)
|
48 |
+
|
49 |
+
if rot_rand:
|
50 |
+
rot_angle = 2 * angle_range * np.random.random_sample() - angle_range
|
51 |
+
# img = img.rotate_ccw_about_centre(rot_angle)
|
52 |
+
|
53 |
+
# Get image's bounding box coordinates
|
54 |
+
bbox = bounding_box((0, 0), [img.shape[0] - 1, img.shape[1] - 1])
|
55 |
+
# Translate to origin and rotate counter-clockwise
|
56 |
+
trans = Translation(-img.centre(),
|
57 |
+
skip_checks=True).compose_before(
|
58 |
+
Rotation.init_from_2d_ccw_angle(rot_angle, degrees=True))
|
59 |
+
rotated_bbox = trans.apply(bbox)
|
60 |
+
# Create new translation so that min bbox values go to 0
|
61 |
+
t = Translation(-rotated_bbox.bounds()[0])
|
62 |
+
trans.compose_before_inplace(t)
|
63 |
+
rotated_bbox = trans.apply(bbox)
|
64 |
+
# Output image's shape is the range of the rotated bounding box
|
65 |
+
# while respecting the users rounding preference.
|
66 |
+
shape = round_image_shape(rotated_bbox.range() + 1, 'round')
|
67 |
+
|
68 |
+
img = img.warp_to_shape(
|
69 |
+
shape, trans.pseudoinverse(), warp_landmarks=True, mode=warp_mode)
|
70 |
+
|
71 |
+
img = img.resize([image_size, image_size])
|
72 |
+
|
73 |
+
return img
|
74 |
+
|
75 |
+
|
76 |
+
def augment_menpo_img_ns(img, img_dir_ns, p_ns=0, ns_ind=None):
|
77 |
+
"""texture style image augmentation using stylized copies in *img_dir_ns*"""
|
78 |
+
|
79 |
+
if p_ns > 0.5:
|
80 |
+
ns_augs = glob(os.path.join(img_dir_ns, img.path.name.split('.')[0] + '_ns*'))
|
81 |
+
num_augs = len(ns_augs)
|
82 |
+
if num_augs > 0:
|
83 |
+
if ns_ind is None or ns_ind >= num_augs:
|
84 |
+
ns_ind = np.random.randint(0, num_augs)
|
85 |
+
ns_aug = mio.import_image(ns_augs[ns_ind])
|
86 |
+
img.pixels = ns_aug.pixels
|
87 |
+
return img
|
88 |
+
|
89 |
+
|
90 |
+
def augment_menpo_img_ns_dont_apply(img, img_dir_ns, p_ns=0, ns_ind=None):
|
91 |
+
"""texture style image augmentation using stylized copies in *img_dir_ns*"""
|
92 |
+
|
93 |
+
if p_ns > 0.5:
|
94 |
+
ns_augs = glob(os.path.join(img_dir_ns, img.path.name.split('.')[0] + '_ns*'))
|
95 |
+
num_augs = len(ns_augs)
|
96 |
+
if num_augs > 0:
|
97 |
+
if ns_ind is None or ns_ind >= num_augs:
|
98 |
+
ns_ind = np.random.randint(0, num_augs)
|
99 |
+
# ns_aug = mio.import_image(ns_augs[ns_ind])
|
100 |
+
# ns_pixels = ns_aug.pixels
|
101 |
+
return img
|
102 |
+
|
103 |
+
|
104 |
+
def augment_menpo_img_geom_dont_apply(img, p_geom=0):
|
105 |
+
"""geometric style image augmentation using random face deformations"""
|
106 |
+
|
107 |
+
if p_geom > 0.5:
|
108 |
+
lms_geom_warp = deform_face_geometric_style(img.landmarks['PTS'].points.copy(), p_scale=p_geom, p_shift=p_geom)
|
109 |
+
return img
|
110 |
+
|
111 |
+
|
112 |
+
def augment_menpo_img_geom(img, p_geom=0):
|
113 |
+
"""geometric style image augmentation using random face deformations"""
|
114 |
+
|
115 |
+
if p_geom > 0.5:
|
116 |
+
lms_geom_warp = deform_face_geometric_style(img.landmarks['PTS'].points.copy(), p_scale=p_geom, p_shift=p_geom)
|
117 |
+
img=warp_face_image_tps(img, PointCloud(lms_geom_warp))
|
118 |
+
return img
|
119 |
+
|
120 |
+
|
121 |
+
def load_menpo_image_list(
|
122 |
+
img_dir, train_crop_dir, img_dir_ns, mode, bb_dictionary=None, image_size=256, margin=0.25,
|
123 |
+
bb_type='gt', test_data='full', augment_basic=True, augment_texture=False, p_texture=0,
|
124 |
+
augment_geom=False, p_geom=0, verbose=False,ns_ind=None, dataset='training'):
|
125 |
+
|
126 |
+
def crop_to_face_image_gt(img):
|
127 |
+
return crop_to_face_image(img, bb_dictionary, gt=True, margin=margin, image_size=image_size)
|
128 |
+
|
129 |
+
def crop_to_face_image_init(img):
|
130 |
+
return crop_to_face_image(img, bb_dictionary, gt=False, margin=margin, image_size=image_size)
|
131 |
+
|
132 |
+
def augment_menpo_img_ns_rand(img):
|
133 |
+
return augment_menpo_img_ns(img, img_dir_ns, p_ns=1. * (np.random.rand() < p_texture),ns_ind=ns_ind)
|
134 |
+
|
135 |
+
def augment_menpo_img_geom_rand(img):
|
136 |
+
return augment_menpo_img_geom(img, p_geom=1. * (np.random.rand() < p_geom))
|
137 |
+
|
138 |
+
if mode is 'TRAIN':
|
139 |
+
if train_crop_dir is None:
|
140 |
+
img_set_dir = os.path.join(img_dir, dataset+'_set')
|
141 |
+
out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False)
|
142 |
+
if bb_type is 'gt':
|
143 |
+
out_image_list = out_image_list.map(crop_to_face_image_gt)
|
144 |
+
elif bb_type is 'init':
|
145 |
+
out_image_list = out_image_list.map(crop_to_face_image_init)
|
146 |
+
else:
|
147 |
+
img_set_dir = os.path.join(img_dir, train_crop_dir)
|
148 |
+
out_image_list = mio.import_images(img_set_dir, verbose=verbose)
|
149 |
+
|
150 |
+
if augment_texture and img_dir_ns is not None:
|
151 |
+
out_image_list = out_image_list.map(augment_menpo_img_ns_rand)
|
152 |
+
if augment_geom:
|
153 |
+
out_image_list = out_image_list.map(augment_menpo_img_geom_rand)
|
154 |
+
if augment_basic:
|
155 |
+
out_image_list = out_image_list.map(augment_face_image)
|
156 |
+
|
157 |
+
else:
|
158 |
+
img_set_dir = os.path.join(img_dir, test_data + '_set')
|
159 |
+
if test_data in ['full', 'challenging', 'common', 'training', 'test']:
|
160 |
+
out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False)
|
161 |
+
if bb_type is 'gt':
|
162 |
+
out_image_list = out_image_list.map(crop_to_face_image_gt)
|
163 |
+
elif bb_type is 'init':
|
164 |
+
out_image_list = out_image_list.map(crop_to_face_image_init)
|
165 |
+
else:
|
166 |
+
out_image_list = mio.import_images(img_set_dir, verbose=verbose)
|
167 |
+
|
168 |
+
return out_image_list
|
169 |
+
|
170 |
+
|
171 |
+
def load_menpo_image_list_no_geom(
|
172 |
+
img_dir, train_crop_dir, img_dir_ns, mode, bb_dictionary=None, image_size=256, margin=0.25,
|
173 |
+
bb_type='gt', test_data='full', augment_basic=True, augment_texture=False, p_texture=0,
|
174 |
+
augment_geom=False, p_geom=0, verbose=False,ns_ind=None, dataset='training'):
|
175 |
+
|
176 |
+
def crop_to_face_image_gt(img):
|
177 |
+
return crop_to_face_image(img, bb_dictionary, gt=True, margin=margin, image_size=image_size)
|
178 |
+
|
179 |
+
def crop_to_face_image_init(img):
|
180 |
+
return crop_to_face_image(img, bb_dictionary, gt=False, margin=margin, image_size=image_size)
|
181 |
+
|
182 |
+
def augment_menpo_img_ns_rand(img):
|
183 |
+
return augment_menpo_img_ns(img, img_dir_ns, p_ns=1. * (np.random.rand() < p_texture),ns_ind=ns_ind)
|
184 |
+
|
185 |
+
def augment_menpo_img_geom_rand(img):
|
186 |
+
return augment_menpo_img_geom_dont_apply(img, p_geom=1. * (np.random.rand() < p_geom))
|
187 |
+
|
188 |
+
if mode is 'TRAIN':
|
189 |
+
if train_crop_dir is None:
|
190 |
+
img_set_dir = os.path.join(img_dir, dataset+'_set')
|
191 |
+
out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False)
|
192 |
+
if bb_type is 'gt':
|
193 |
+
out_image_list = out_image_list.map(crop_to_face_image_gt)
|
194 |
+
elif bb_type is 'init':
|
195 |
+
out_image_list = out_image_list.map(crop_to_face_image_init)
|
196 |
+
else:
|
197 |
+
img_set_dir = os.path.join(img_dir, train_crop_dir)
|
198 |
+
out_image_list = mio.import_images(img_set_dir, verbose=verbose)
|
199 |
+
|
200 |
+
if augment_texture and img_dir_ns is not None:
|
201 |
+
out_image_list = out_image_list.map(augment_menpo_img_ns_rand)
|
202 |
+
if augment_geom:
|
203 |
+
out_image_list = out_image_list.map(augment_menpo_img_geom_rand)
|
204 |
+
if augment_basic:
|
205 |
+
out_image_list = out_image_list.map(augment_face_image)
|
206 |
+
|
207 |
+
else:
|
208 |
+
img_set_dir = os.path.join(img_dir, test_data + '_set')
|
209 |
+
if test_data in ['full', 'challenging', 'common', 'training', 'test']:
|
210 |
+
out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False)
|
211 |
+
if bb_type is 'gt':
|
212 |
+
out_image_list = out_image_list.map(crop_to_face_image_gt)
|
213 |
+
elif bb_type is 'init':
|
214 |
+
out_image_list = out_image_list.map(crop_to_face_image_init)
|
215 |
+
else:
|
216 |
+
out_image_list = mio.import_images(img_set_dir, verbose=verbose)
|
217 |
+
|
218 |
+
return out_image_list
|
219 |
+
|
220 |
+
|
221 |
+
def load_menpo_image_list_no_texture(
|
222 |
+
img_dir, train_crop_dir, img_dir_ns, mode, bb_dictionary=None, image_size=256, margin=0.25,
|
223 |
+
bb_type='gt', test_data='full', augment_basic=True, augment_texture=False, p_texture=0,
|
224 |
+
augment_geom=False, p_geom=0, verbose=False,ns_ind=None, dataset='training'):
|
225 |
+
|
226 |
+
def crop_to_face_image_gt(img):
|
227 |
+
return crop_to_face_image(img, bb_dictionary, gt=True, margin=margin, image_size=image_size)
|
228 |
+
|
229 |
+
def crop_to_face_image_init(img):
|
230 |
+
return crop_to_face_image(img, bb_dictionary, gt=False, margin=margin, image_size=image_size)
|
231 |
+
|
232 |
+
def augment_menpo_img_ns_rand(img):
|
233 |
+
return augment_menpo_img_ns_dont_apply(img, img_dir_ns, p_ns=1. * (np.random.rand() < p_texture),ns_ind=ns_ind)
|
234 |
+
|
235 |
+
def augment_menpo_img_geom_rand(img):
|
236 |
+
return augment_menpo_img_geom(img, p_geom=1. * (np.random.rand() < p_geom))
|
237 |
+
|
238 |
+
if mode is 'TRAIN':
|
239 |
+
if train_crop_dir is None:
|
240 |
+
img_set_dir = os.path.join(img_dir, dataset+'_set')
|
241 |
+
out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False)
|
242 |
+
if bb_type is 'gt':
|
243 |
+
out_image_list = out_image_list.map(crop_to_face_image_gt)
|
244 |
+
elif bb_type is 'init':
|
245 |
+
out_image_list = out_image_list.map(crop_to_face_image_init)
|
246 |
+
else:
|
247 |
+
img_set_dir = os.path.join(img_dir, train_crop_dir)
|
248 |
+
out_image_list = mio.import_images(img_set_dir, verbose=verbose)
|
249 |
+
|
250 |
+
if augment_texture and img_dir_ns is not None:
|
251 |
+
out_image_list = out_image_list.map(augment_menpo_img_ns_rand)
|
252 |
+
if augment_geom:
|
253 |
+
out_image_list = out_image_list.map(augment_menpo_img_geom_rand)
|
254 |
+
if augment_basic:
|
255 |
+
out_image_list = out_image_list.map(augment_face_image)
|
256 |
+
|
257 |
+
else:
|
258 |
+
img_set_dir = os.path.join(img_dir, test_data + '_set')
|
259 |
+
if test_data in ['full', 'challenging', 'common', 'training', 'test']:
|
260 |
+
out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False)
|
261 |
+
if bb_type is 'gt':
|
262 |
+
out_image_list = out_image_list.map(crop_to_face_image_gt)
|
263 |
+
elif bb_type is 'init':
|
264 |
+
out_image_list = out_image_list.map(crop_to_face_image_init)
|
265 |
+
else:
|
266 |
+
out_image_list = mio.import_images(img_set_dir, verbose=verbose)
|
267 |
+
|
268 |
+
return out_image_list
|
269 |
+
|
270 |
+
|
271 |
+
def load_menpo_image_list_no_artistic(
|
272 |
+
img_dir, train_crop_dir, img_dir_ns, mode, bb_dictionary=None, image_size=256, margin=0.25,
|
273 |
+
bb_type='gt', test_data='full', augment_basic=True, augment_texture=False, p_texture=0,
|
274 |
+
augment_geom=False, p_geom=0, verbose=False,ns_ind=None, dataset='training'):
|
275 |
+
|
276 |
+
def crop_to_face_image_gt(img):
|
277 |
+
return crop_to_face_image(img, bb_dictionary, gt=True, margin=margin, image_size=image_size)
|
278 |
+
|
279 |
+
def crop_to_face_image_init(img):
|
280 |
+
return crop_to_face_image(img, bb_dictionary, gt=False, margin=margin, image_size=image_size)
|
281 |
+
|
282 |
+
def augment_menpo_img_ns_rand(img):
|
283 |
+
return augment_menpo_img_ns_dont_apply(img, img_dir_ns, p_ns=1. * (np.random.rand() < p_texture),ns_ind=ns_ind)
|
284 |
+
|
285 |
+
def augment_menpo_img_geom_rand(img):
|
286 |
+
return augment_menpo_img_geom_dont_apply(img, p_geom=1. * (np.random.rand() < p_geom))
|
287 |
+
|
288 |
+
if mode is 'TRAIN':
|
289 |
+
if train_crop_dir is None:
|
290 |
+
img_set_dir = os.path.join(img_dir, dataset+'_set')
|
291 |
+
out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False)
|
292 |
+
if bb_type is 'gt':
|
293 |
+
out_image_list = out_image_list.map(crop_to_face_image_gt)
|
294 |
+
elif bb_type is 'init':
|
295 |
+
out_image_list = out_image_list.map(crop_to_face_image_init)
|
296 |
+
else:
|
297 |
+
img_set_dir = os.path.join(img_dir, train_crop_dir)
|
298 |
+
out_image_list = mio.import_images(img_set_dir, verbose=verbose)
|
299 |
+
|
300 |
+
if augment_texture and img_dir_ns is not None:
|
301 |
+
out_image_list = out_image_list.map(augment_menpo_img_ns_rand)
|
302 |
+
if augment_geom:
|
303 |
+
out_image_list = out_image_list.map(augment_menpo_img_geom_rand)
|
304 |
+
if augment_basic:
|
305 |
+
out_image_list = out_image_list.map(augment_face_image)
|
306 |
+
|
307 |
+
else:
|
308 |
+
img_set_dir = os.path.join(img_dir, test_data + '_set')
|
309 |
+
if test_data in ['full', 'challenging', 'common', 'training', 'test']:
|
310 |
+
out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False)
|
311 |
+
if bb_type is 'gt':
|
312 |
+
out_image_list = out_image_list.map(crop_to_face_image_gt)
|
313 |
+
elif bb_type is 'init':
|
314 |
+
out_image_list = out_image_list.map(crop_to_face_image_init)
|
315 |
+
else:
|
316 |
+
out_image_list = mio.import_images(img_set_dir, verbose=verbose)
|
317 |
+
|
318 |
return out_image_list
|
MakeItTalk/thirdparty/face_of_art/old/temp/deep_heatmaps_model_primary_net.py
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
MakeItTalk/thirdparty/face_of_art/old/temp/main_primary.py
CHANGED
@@ -1,121 +1,121 @@
|
|
1 |
-
import tensorflow as tf
|
2 |
-
from deep_heatmaps_model_primary_net import DeepHeatmapsModel
|
3 |
-
import os
|
4 |
-
|
5 |
-
|
6 |
-
data_dir = '/Users/arik/Dropbox/a_mac_thesis/face_heatmap_networks/conventional_landmark_detection_dataset/'
|
7 |
-
pre_train_path = 'saved_models/0.01/model/deep_heatmaps-50000'
|
8 |
-
output_dir = os.getcwd()
|
9 |
-
|
10 |
-
flags = tf.app.flags
|
11 |
-
|
12 |
-
# mode and logging parameters
|
13 |
-
flags.DEFINE_string('mode', 'TRAIN', "'TRAIN' or 'TEST'")
|
14 |
-
flags.DEFINE_integer('print_every', 100, "print losses to screen + log every X steps")
|
15 |
-
flags.DEFINE_integer('save_every', 20000, "save model every X steps")
|
16 |
-
flags.DEFINE_integer('sample_every', 5000, "sample heatmaps + landmark predictions every X steps")
|
17 |
-
flags.DEFINE_integer('sample_grid', 9, 'number of training images in sample')
|
18 |
-
flags.DEFINE_bool('sample_to_log', True, 'samples will be saved to tensorboard log')
|
19 |
-
flags.DEFINE_integer('valid_size', 9, 'number of validation images to run')
|
20 |
-
flags.DEFINE_integer('log_valid_every', 10, 'evaluate on valid set every X epochs')
|
21 |
-
flags.DEFINE_integer('debug_data_size', 20, 'subset data size to test in debug mode')
|
22 |
-
flags.DEFINE_bool('debug', False, 'run in debug mode - use subset of the data')
|
23 |
-
|
24 |
-
# define paths
|
25 |
-
flags.DEFINE_string('output_dir', output_dir, "directory for saving models, logs and samples")
|
26 |
-
flags.DEFINE_string('save_model_path', 'model', "directory for saving the model")
|
27 |
-
flags.DEFINE_string('save_sample_path', 'sample', "directory for saving the sampled images")
|
28 |
-
flags.DEFINE_string('save_log_path', 'logs', "directory for saving the log file")
|
29 |
-
flags.DEFINE_string('img_path', data_dir, "data directory")
|
30 |
-
flags.DEFINE_string('test_model_path', 'model/deep_heatmaps-50000', "saved model to test")
|
31 |
-
flags.DEFINE_string('test_data', 'full', 'test set to use: full/common/challenging/test/art')
|
32 |
-
flags.DEFINE_string('valid_data', 'full', 'validation set to use: full/common/challenging/test/art')
|
33 |
-
flags.DEFINE_string('train_crop_dir', 'crop_gt_margin_0.25', "directory of train images cropped to bb (+margin)")
|
34 |
-
flags.DEFINE_string('img_dir_ns', 'crop_gt_margin_0.25_ns', "dir of train imgs cropped to bb + style transfer")
|
35 |
-
flags.DEFINE_string('epoch_data_dir', 'epoch_data', "directory containing pre-augmented data for each epoch")
|
36 |
-
flags.DEFINE_bool('use_epoch_data', False, "use pre-augmented data")
|
37 |
-
|
38 |
-
# pretrain parameters (for fine-tuning / resume training)
|
39 |
-
flags.DEFINE_string('pre_train_path', pre_train_path, 'pretrained model path')
|
40 |
-
flags.DEFINE_bool('load_pretrain', False, "load pretrained weight?")
|
41 |
-
|
42 |
-
# input data parameters
|
43 |
-
flags.DEFINE_integer('image_size', 256, "image size")
|
44 |
-
flags.DEFINE_integer('c_dim', 3, "color channels")
|
45 |
-
flags.DEFINE_integer('num_landmarks', 68, "number of face landmarks")
|
46 |
-
flags.DEFINE_float('sigma', 1.5, "std for heatmap generation gaussian")
|
47 |
-
flags.DEFINE_integer('scale', 1, 'scale for image normalization 255/1/0')
|
48 |
-
flags.DEFINE_float('margin', 0.25, 'margin for face crops - % of bb size')
|
49 |
-
flags.DEFINE_string('bb_type', 'gt', "bb to use - 'gt':for ground truth / 'init':for face detector output")
|
50 |
-
flags.DEFINE_bool('approx_maps', True, 'use heatmap approximation - major speed up')
|
51 |
-
flags.DEFINE_float('win_mult', 3.33335, 'gaussian filter size for approx maps: 2 * sigma * win_mult + 1')
|
52 |
-
|
53 |
-
# optimization parameters
|
54 |
-
flags.DEFINE_integer('train_iter', 100000, 'maximum training iterations')
|
55 |
-
flags.DEFINE_integer('batch_size', 10, "batch_size")
|
56 |
-
flags.DEFINE_float('learning_rate', 1e-4, "initial learning rate")
|
57 |
-
flags.DEFINE_bool('adam_optimizer', True, "use adam optimizer (if False momentum optimizer is used)")
|
58 |
-
flags.DEFINE_float('momentum', 0.95, "optimizer momentum (if adam_optimizer==False)")
|
59 |
-
flags.DEFINE_integer('step', 100000, 'step for lr decay')
|
60 |
-
flags.DEFINE_float('gamma', 0.1, 'exponential base for lr decay')
|
61 |
-
flags.DEFINE_float('reg', 1e-5, 'scalar multiplier for weight decay (0 to disable)')
|
62 |
-
flags.DEFINE_string('weight_initializer', 'xavier', 'weight initializer: random_normal / xavier')
|
63 |
-
flags.DEFINE_float('weight_initializer_std', 0.01, 'std for random_normal weight initializer')
|
64 |
-
flags.DEFINE_float('bias_initializer', 0.0, 'constant value for bias initializer')
|
65 |
-
|
66 |
-
# augmentation parameters
|
67 |
-
flags.DEFINE_bool('augment_basic', True, "use basic augmentation?")
|
68 |
-
flags.DEFINE_integer('basic_start', 0, 'min epoch to start basic augmentation')
|
69 |
-
flags.DEFINE_bool('augment_texture', False, "use artistic texture augmentation?")
|
70 |
-
flags.DEFINE_float('p_texture', 0., 'initial probability of artistic texture augmentation')
|
71 |
-
flags.DEFINE_bool('augment_geom', False, "use artistic geometric augmentation?")
|
72 |
-
flags.DEFINE_float('p_geom', 0., 'initial probability of artistic geometric augmentation')
|
73 |
-
flags.DEFINE_integer('artistic_step', -1, 'step for increasing probability of artistic augmentation in epochs')
|
74 |
-
flags.DEFINE_integer('artistic_start', 0, 'min epoch to start artistic augmentation')
|
75 |
-
|
76 |
-
|
77 |
-
FLAGS = flags.FLAGS
|
78 |
-
|
79 |
-
if not os.path.exists(FLAGS.output_dir):
|
80 |
-
os.mkdir(FLAGS.output_dir)
|
81 |
-
|
82 |
-
|
83 |
-
def main(_):
|
84 |
-
|
85 |
-
save_model_path = os.path.join(FLAGS.output_dir, FLAGS.save_model_path)
|
86 |
-
save_sample_path = os.path.join(FLAGS.output_dir, FLAGS.save_sample_path)
|
87 |
-
save_log_path = os.path.join(FLAGS.output_dir, FLAGS.save_log_path)
|
88 |
-
|
89 |
-
# create directories if not exist
|
90 |
-
if not os.path.exists(save_model_path):
|
91 |
-
os.mkdir(save_model_path)
|
92 |
-
if not os.path.exists(save_log_path):
|
93 |
-
os.mkdir(save_log_path)
|
94 |
-
if not os.path.exists(save_sample_path) and (not FLAGS.sample_to_log or FLAGS.mode != 'TRAIN'):
|
95 |
-
os.mkdir(save_sample_path)
|
96 |
-
|
97 |
-
model = DeepHeatmapsModel(
|
98 |
-
mode=FLAGS.mode, train_iter=FLAGS.train_iter, batch_size=FLAGS.batch_size, learning_rate=FLAGS.learning_rate,
|
99 |
-
adam_optimizer=FLAGS.adam_optimizer, momentum=FLAGS.momentum, step=FLAGS.step, gamma=FLAGS.gamma, reg=FLAGS.reg,
|
100 |
-
weight_initializer=FLAGS.weight_initializer, weight_initializer_std=FLAGS.weight_initializer_std,
|
101 |
-
bias_initializer=FLAGS.bias_initializer, image_size=FLAGS.image_size, c_dim=FLAGS.c_dim,
|
102 |
-
num_landmarks=FLAGS.num_landmarks, sigma=FLAGS.sigma, scale=FLAGS.scale, margin=FLAGS.margin,
|
103 |
-
bb_type=FLAGS.bb_type, approx_maps=FLAGS.approx_maps, win_mult=FLAGS.win_mult, augment_basic=FLAGS.augment_basic,
|
104 |
-
basic_start=FLAGS.basic_start, augment_texture=FLAGS.augment_texture, p_texture=FLAGS.p_texture,
|
105 |
-
augment_geom=FLAGS.augment_geom, p_geom=FLAGS.p_geom, artistic_step=FLAGS.artistic_step,
|
106 |
-
artistic_start=FLAGS.artistic_start, output_dir=FLAGS.output_dir, save_model_path=save_model_path,
|
107 |
-
save_sample_path=save_sample_path, save_log_path=save_log_path, test_model_path=FLAGS.test_model_path,
|
108 |
-
pre_train_path=FLAGS.pre_train_path, load_pretrain=FLAGS.load_pretrain, img_path=FLAGS.img_path,
|
109 |
-
test_data=FLAGS.test_data, valid_data=FLAGS.valid_data, valid_size=FLAGS.valid_size,
|
110 |
-
log_valid_every=FLAGS.log_valid_every, train_crop_dir=FLAGS.train_crop_dir, img_dir_ns=FLAGS.img_dir_ns,
|
111 |
-
print_every=FLAGS.print_every, save_every=FLAGS.save_every, sample_every=FLAGS.sample_every,
|
112 |
-
sample_grid=FLAGS.sample_grid, sample_to_log=FLAGS.sample_to_log, debug_data_size=FLAGS.debug_data_size,
|
113 |
-
debug=FLAGS.debug, use_epoch_data=FLAGS.use_epoch_data, epoch_data_dir=FLAGS.epoch_data_dir)
|
114 |
-
|
115 |
-
if FLAGS.mode == 'TRAIN':
|
116 |
-
model.train()
|
117 |
-
else:
|
118 |
-
model.eval()
|
119 |
-
|
120 |
-
if __name__ == '__main__':
|
121 |
-
tf.app.run()
|
|
|
1 |
+
import tensorflow as tf
|
2 |
+
from deep_heatmaps_model_primary_net import DeepHeatmapsModel
|
3 |
+
import os
|
4 |
+
|
5 |
+
|
6 |
+
data_dir = '/Users/arik/Dropbox/a_mac_thesis/face_heatmap_networks/conventional_landmark_detection_dataset/'
|
7 |
+
pre_train_path = 'saved_models/0.01/model/deep_heatmaps-50000'
|
8 |
+
output_dir = os.getcwd()
|
9 |
+
|
10 |
+
flags = tf.app.flags
|
11 |
+
|
12 |
+
# mode and logging parameters
|
13 |
+
flags.DEFINE_string('mode', 'TRAIN', "'TRAIN' or 'TEST'")
|
14 |
+
flags.DEFINE_integer('print_every', 100, "print losses to screen + log every X steps")
|
15 |
+
flags.DEFINE_integer('save_every', 20000, "save model every X steps")
|
16 |
+
flags.DEFINE_integer('sample_every', 5000, "sample heatmaps + landmark predictions every X steps")
|
17 |
+
flags.DEFINE_integer('sample_grid', 9, 'number of training images in sample')
|
18 |
+
flags.DEFINE_bool('sample_to_log', True, 'samples will be saved to tensorboard log')
|
19 |
+
flags.DEFINE_integer('valid_size', 9, 'number of validation images to run')
|
20 |
+
flags.DEFINE_integer('log_valid_every', 10, 'evaluate on valid set every X epochs')
|
21 |
+
flags.DEFINE_integer('debug_data_size', 20, 'subset data size to test in debug mode')
|
22 |
+
flags.DEFINE_bool('debug', False, 'run in debug mode - use subset of the data')
|
23 |
+
|
24 |
+
# define paths
|
25 |
+
flags.DEFINE_string('output_dir', output_dir, "directory for saving models, logs and samples")
|
26 |
+
flags.DEFINE_string('save_model_path', 'model', "directory for saving the model")
|
27 |
+
flags.DEFINE_string('save_sample_path', 'sample', "directory for saving the sampled images")
|
28 |
+
flags.DEFINE_string('save_log_path', 'logs', "directory for saving the log file")
|
29 |
+
flags.DEFINE_string('img_path', data_dir, "data directory")
|
30 |
+
flags.DEFINE_string('test_model_path', 'model/deep_heatmaps-50000', "saved model to test")
|
31 |
+
flags.DEFINE_string('test_data', 'full', 'test set to use: full/common/challenging/test/art')
|
32 |
+
flags.DEFINE_string('valid_data', 'full', 'validation set to use: full/common/challenging/test/art')
|
33 |
+
flags.DEFINE_string('train_crop_dir', 'crop_gt_margin_0.25', "directory of train images cropped to bb (+margin)")
|
34 |
+
flags.DEFINE_string('img_dir_ns', 'crop_gt_margin_0.25_ns', "dir of train imgs cropped to bb + style transfer")
|
35 |
+
flags.DEFINE_string('epoch_data_dir', 'epoch_data', "directory containing pre-augmented data for each epoch")
|
36 |
+
flags.DEFINE_bool('use_epoch_data', False, "use pre-augmented data")
|
37 |
+
|
38 |
+
# pretrain parameters (for fine-tuning / resume training)
|
39 |
+
flags.DEFINE_string('pre_train_path', pre_train_path, 'pretrained model path')
|
40 |
+
flags.DEFINE_bool('load_pretrain', False, "load pretrained weight?")
|
41 |
+
|
42 |
+
# input data parameters
|
43 |
+
flags.DEFINE_integer('image_size', 256, "image size")
|
44 |
+
flags.DEFINE_integer('c_dim', 3, "color channels")
|
45 |
+
flags.DEFINE_integer('num_landmarks', 68, "number of face landmarks")
|
46 |
+
flags.DEFINE_float('sigma', 1.5, "std for heatmap generation gaussian")
|
47 |
+
flags.DEFINE_integer('scale', 1, 'scale for image normalization 255/1/0')
|
48 |
+
flags.DEFINE_float('margin', 0.25, 'margin for face crops - % of bb size')
|
49 |
+
flags.DEFINE_string('bb_type', 'gt', "bb to use - 'gt':for ground truth / 'init':for face detector output")
|
50 |
+
flags.DEFINE_bool('approx_maps', True, 'use heatmap approximation - major speed up')
|
51 |
+
flags.DEFINE_float('win_mult', 3.33335, 'gaussian filter size for approx maps: 2 * sigma * win_mult + 1')
|
52 |
+
|
53 |
+
# optimization parameters
|
54 |
+
flags.DEFINE_integer('train_iter', 100000, 'maximum training iterations')
|
55 |
+
flags.DEFINE_integer('batch_size', 10, "batch_size")
|
56 |
+
flags.DEFINE_float('learning_rate', 1e-4, "initial learning rate")
|
57 |
+
flags.DEFINE_bool('adam_optimizer', True, "use adam optimizer (if False momentum optimizer is used)")
|
58 |
+
flags.DEFINE_float('momentum', 0.95, "optimizer momentum (if adam_optimizer==False)")
|
59 |
+
flags.DEFINE_integer('step', 100000, 'step for lr decay')
|
60 |
+
flags.DEFINE_float('gamma', 0.1, 'exponential base for lr decay')
|
61 |
+
flags.DEFINE_float('reg', 1e-5, 'scalar multiplier for weight decay (0 to disable)')
|
62 |
+
flags.DEFINE_string('weight_initializer', 'xavier', 'weight initializer: random_normal / xavier')
|
63 |
+
flags.DEFINE_float('weight_initializer_std', 0.01, 'std for random_normal weight initializer')
|
64 |
+
flags.DEFINE_float('bias_initializer', 0.0, 'constant value for bias initializer')
|
65 |
+
|
66 |
+
# augmentation parameters
|
67 |
+
flags.DEFINE_bool('augment_basic', True, "use basic augmentation?")
|
68 |
+
flags.DEFINE_integer('basic_start', 0, 'min epoch to start basic augmentation')
|
69 |
+
flags.DEFINE_bool('augment_texture', False, "use artistic texture augmentation?")
|
70 |
+
flags.DEFINE_float('p_texture', 0., 'initial probability of artistic texture augmentation')
|
71 |
+
flags.DEFINE_bool('augment_geom', False, "use artistic geometric augmentation?")
|
72 |
+
flags.DEFINE_float('p_geom', 0., 'initial probability of artistic geometric augmentation')
|
73 |
+
flags.DEFINE_integer('artistic_step', -1, 'step for increasing probability of artistic augmentation in epochs')
|
74 |
+
flags.DEFINE_integer('artistic_start', 0, 'min epoch to start artistic augmentation')
|
75 |
+
|
76 |
+
|
77 |
+
FLAGS = flags.FLAGS
|
78 |
+
|
79 |
+
if not os.path.exists(FLAGS.output_dir):
|
80 |
+
os.mkdir(FLAGS.output_dir)
|
81 |
+
|
82 |
+
|
83 |
+
def main(_):
|
84 |
+
|
85 |
+
save_model_path = os.path.join(FLAGS.output_dir, FLAGS.save_model_path)
|
86 |
+
save_sample_path = os.path.join(FLAGS.output_dir, FLAGS.save_sample_path)
|
87 |
+
save_log_path = os.path.join(FLAGS.output_dir, FLAGS.save_log_path)
|
88 |
+
|
89 |
+
# create directories if not exist
|
90 |
+
if not os.path.exists(save_model_path):
|
91 |
+
os.mkdir(save_model_path)
|
92 |
+
if not os.path.exists(save_log_path):
|
93 |
+
os.mkdir(save_log_path)
|
94 |
+
if not os.path.exists(save_sample_path) and (not FLAGS.sample_to_log or FLAGS.mode != 'TRAIN'):
|
95 |
+
os.mkdir(save_sample_path)
|
96 |
+
|
97 |
+
model = DeepHeatmapsModel(
|
98 |
+
mode=FLAGS.mode, train_iter=FLAGS.train_iter, batch_size=FLAGS.batch_size, learning_rate=FLAGS.learning_rate,
|
99 |
+
adam_optimizer=FLAGS.adam_optimizer, momentum=FLAGS.momentum, step=FLAGS.step, gamma=FLAGS.gamma, reg=FLAGS.reg,
|
100 |
+
weight_initializer=FLAGS.weight_initializer, weight_initializer_std=FLAGS.weight_initializer_std,
|
101 |
+
bias_initializer=FLAGS.bias_initializer, image_size=FLAGS.image_size, c_dim=FLAGS.c_dim,
|
102 |
+
num_landmarks=FLAGS.num_landmarks, sigma=FLAGS.sigma, scale=FLAGS.scale, margin=FLAGS.margin,
|
103 |
+
bb_type=FLAGS.bb_type, approx_maps=FLAGS.approx_maps, win_mult=FLAGS.win_mult, augment_basic=FLAGS.augment_basic,
|
104 |
+
basic_start=FLAGS.basic_start, augment_texture=FLAGS.augment_texture, p_texture=FLAGS.p_texture,
|
105 |
+
augment_geom=FLAGS.augment_geom, p_geom=FLAGS.p_geom, artistic_step=FLAGS.artistic_step,
|
106 |
+
artistic_start=FLAGS.artistic_start, output_dir=FLAGS.output_dir, save_model_path=save_model_path,
|
107 |
+
save_sample_path=save_sample_path, save_log_path=save_log_path, test_model_path=FLAGS.test_model_path,
|
108 |
+
pre_train_path=FLAGS.pre_train_path, load_pretrain=FLAGS.load_pretrain, img_path=FLAGS.img_path,
|
109 |
+
test_data=FLAGS.test_data, valid_data=FLAGS.valid_data, valid_size=FLAGS.valid_size,
|
110 |
+
log_valid_every=FLAGS.log_valid_every, train_crop_dir=FLAGS.train_crop_dir, img_dir_ns=FLAGS.img_dir_ns,
|
111 |
+
print_every=FLAGS.print_every, save_every=FLAGS.save_every, sample_every=FLAGS.sample_every,
|
112 |
+
sample_grid=FLAGS.sample_grid, sample_to_log=FLAGS.sample_to_log, debug_data_size=FLAGS.debug_data_size,
|
113 |
+
debug=FLAGS.debug, use_epoch_data=FLAGS.use_epoch_data, epoch_data_dir=FLAGS.epoch_data_dir)
|
114 |
+
|
115 |
+
if FLAGS.mode == 'TRAIN':
|
116 |
+
model.train()
|
117 |
+
else:
|
118 |
+
model.eval()
|
119 |
+
|
120 |
+
if __name__ == '__main__':
|
121 |
+
tf.app.run()
|
MakeItTalk/thirdparty/face_of_art/old/temp/predict_landmarks.py
CHANGED
@@ -1,100 +1,100 @@
|
|
1 |
-
from menpo_functions import *
|
2 |
-
from deep_heatmaps_model_fusion_net import DeepHeatmapsModel
|
3 |
-
import os
|
4 |
-
import pickle
|
5 |
-
|
6 |
-
# directory for saving predictions
|
7 |
-
out_dir = '/Users/arik/Desktop/out/'
|
8 |
-
if not os.path.exists(out_dir):
|
9 |
-
os.mkdir(out_dir)
|
10 |
-
|
11 |
-
# directory with conventional landmark detection datasets (for bounding box files)
|
12 |
-
conv_dir = '/Users/arik/Dropbox/a_mac_thesis/face_heatmap_networks/conventional_landmark_detection_dataset/'
|
13 |
-
|
14 |
-
# bounding box type for conventional landmark detection datasets (gt / init)
|
15 |
-
bb_type='init'
|
16 |
-
|
17 |
-
# directory with clm models for tuning step
|
18 |
-
clm_path='pdm_clm_models/clm_models/g_t_all'
|
19 |
-
|
20 |
-
# directory with pdm models for correction step
|
21 |
-
pdm_path='pdm_clm_models/pdm_models/'
|
22 |
-
|
23 |
-
# model path
|
24 |
-
model_path = '/Users/arik/Dropbox/Thesis_dropbox/models/model_train_wiki/model/deep_heatmaps-60000'
|
25 |
-
|
26 |
-
# directory containing test sets
|
27 |
-
data_dir = '/Users/arik/Dropbox/a_mac_thesis/artistic_faces/artistic_face_dataset/'
|
28 |
-
test_sets = ['all_AF'] # test sets to evaluate
|
29 |
-
|
30 |
-
|
31 |
-
# data_dir = '/Users/arik/Desktop/Thesis_mac/semi_art_sets/semi_art_sets_wiki_train_2/'
|
32 |
-
# test_sets = [
|
33 |
-
# 'challenging_set_aug_geom_texture',
|
34 |
-
# 'common_set_aug_geom_texture',
|
35 |
-
# 'test_set_aug_geom_texture',
|
36 |
-
# 'full_set_aug_geom_texture'
|
37 |
-
# ]
|
38 |
-
|
39 |
-
|
40 |
-
# load heatmap model
|
41 |
-
heatmap_model = DeepHeatmapsModel(
|
42 |
-
mode='TEST', img_path=conv_dir, test_model_path=model_path, menpo_verbose=False, scale=1)
|
43 |
-
|
44 |
-
bb_dir = os.path.join(conv_dir, 'Bounding_Boxes')
|
45 |
-
|
46 |
-
# predict landmarks for input test sets
|
47 |
-
for i,test_data in enumerate(test_sets):
|
48 |
-
|
49 |
-
if i == 0:
|
50 |
-
reuse=None
|
51 |
-
else:
|
52 |
-
reuse=True
|
53 |
-
|
54 |
-
out_temp = os.path.join(out_dir, test_data)
|
55 |
-
if not os.path.exists(out_temp):
|
56 |
-
os.mkdir(out_temp)
|
57 |
-
|
58 |
-
bb_dictionary = load_bb_dictionary(bb_dir, mode='TEST', test_data=test_data)
|
59 |
-
|
60 |
-
img_list = load_menpo_image_list(img_dir=data_dir, train_crop_dir=data_dir, img_dir_ns=data_dir, mode='TEST',
|
61 |
-
test_data=test_data, bb_type=bb_type, bb_dictionary=bb_dictionary)
|
62 |
-
|
63 |
-
img_list = img_list[:10]
|
64 |
-
print test_data + ':' + str(len(img_list)) + ' images'
|
65 |
-
|
66 |
-
preds = heatmap_model.get_landmark_predictions(img_list=img_list, pdm_models_dir=pdm_path, clm_model_path=clm_path,
|
67 |
-
reuse=reuse)
|
68 |
-
|
69 |
-
init_lms = preds['E']
|
70 |
-
ppdm_lms = preds['ECp']
|
71 |
-
clm_lms = preds['ECpT']
|
72 |
-
ect_lms = preds['ECT']
|
73 |
-
ecptp_jaw_lms = preds['ECpTp_jaw']
|
74 |
-
ecptp_out_lms = preds['ECpTp_out']
|
75 |
-
|
76 |
-
filehandler = open(os.path.join(out_temp,'E_lms'),"wb")
|
77 |
-
pickle.dump(init_lms,filehandler)
|
78 |
-
filehandler.close()
|
79 |
-
|
80 |
-
filehandler = open(os.path.join(out_temp,'ECp_lms'),"wb")
|
81 |
-
pickle.dump(ppdm_lms,filehandler)
|
82 |
-
filehandler.close()
|
83 |
-
|
84 |
-
filehandler = open(os.path.join(out_temp,'ECpT_lms'),"wb")
|
85 |
-
pickle.dump(clm_lms,filehandler)
|
86 |
-
filehandler.close()
|
87 |
-
|
88 |
-
filehandler = open(os.path.join(out_temp,'ECT_lms'),"wb")
|
89 |
-
pickle.dump(ect_lms,filehandler)
|
90 |
-
filehandler.close()
|
91 |
-
|
92 |
-
filehandler = open(os.path.join(out_temp,'ECpTp_jaw_lms'),"wb")
|
93 |
-
pickle.dump(ecptp_jaw_lms,filehandler)
|
94 |
-
filehandler.close()
|
95 |
-
|
96 |
-
filehandler = open(os.path.join(out_temp,'ECpTp_out_lms'),"wb")
|
97 |
-
pickle.dump(ecptp_out_lms,filehandler)
|
98 |
-
filehandler.close()
|
99 |
-
|
100 |
print("\nDone!\n")
|
|
|
1 |
+
from menpo_functions import *
|
2 |
+
from deep_heatmaps_model_fusion_net import DeepHeatmapsModel
|
3 |
+
import os
|
4 |
+
import pickle
|
5 |
+
|
6 |
+
# directory for saving predictions
|
7 |
+
out_dir = '/Users/arik/Desktop/out/'
|
8 |
+
if not os.path.exists(out_dir):
|
9 |
+
os.mkdir(out_dir)
|
10 |
+
|
11 |
+
# directory with conventional landmark detection datasets (for bounding box files)
|
12 |
+
conv_dir = '/Users/arik/Dropbox/a_mac_thesis/face_heatmap_networks/conventional_landmark_detection_dataset/'
|
13 |
+
|
14 |
+
# bounding box type for conventional landmark detection datasets (gt / init)
|
15 |
+
bb_type='init'
|
16 |
+
|
17 |
+
# directory with clm models for tuning step
|
18 |
+
clm_path='pdm_clm_models/clm_models/g_t_all'
|
19 |
+
|
20 |
+
# directory with pdm models for correction step
|
21 |
+
pdm_path='pdm_clm_models/pdm_models/'
|
22 |
+
|
23 |
+
# model path
|
24 |
+
model_path = '/Users/arik/Dropbox/Thesis_dropbox/models/model_train_wiki/model/deep_heatmaps-60000'
|
25 |
+
|
26 |
+
# directory containing test sets
|
27 |
+
data_dir = '/Users/arik/Dropbox/a_mac_thesis/artistic_faces/artistic_face_dataset/'
|
28 |
+
test_sets = ['all_AF'] # test sets to evaluate
|
29 |
+
|
30 |
+
|
31 |
+
# data_dir = '/Users/arik/Desktop/Thesis_mac/semi_art_sets/semi_art_sets_wiki_train_2/'
|
32 |
+
# test_sets = [
|
33 |
+
# 'challenging_set_aug_geom_texture',
|
34 |
+
# 'common_set_aug_geom_texture',
|
35 |
+
# 'test_set_aug_geom_texture',
|
36 |
+
# 'full_set_aug_geom_texture'
|
37 |
+
# ]
|
38 |
+
|
39 |
+
|
40 |
+
# load heatmap model
|
41 |
+
heatmap_model = DeepHeatmapsModel(
|
42 |
+
mode='TEST', img_path=conv_dir, test_model_path=model_path, menpo_verbose=False, scale=1)
|
43 |
+
|
44 |
+
bb_dir = os.path.join(conv_dir, 'Bounding_Boxes')
|
45 |
+
|
46 |
+
# predict landmarks for input test sets
|
47 |
+
for i,test_data in enumerate(test_sets):
|
48 |
+
|
49 |
+
if i == 0:
|
50 |
+
reuse=None
|
51 |
+
else:
|
52 |
+
reuse=True
|
53 |
+
|
54 |
+
out_temp = os.path.join(out_dir, test_data)
|
55 |
+
if not os.path.exists(out_temp):
|
56 |
+
os.mkdir(out_temp)
|
57 |
+
|
58 |
+
bb_dictionary = load_bb_dictionary(bb_dir, mode='TEST', test_data=test_data)
|
59 |
+
|
60 |
+
img_list = load_menpo_image_list(img_dir=data_dir, train_crop_dir=data_dir, img_dir_ns=data_dir, mode='TEST',
|
61 |
+
test_data=test_data, bb_type=bb_type, bb_dictionary=bb_dictionary)
|
62 |
+
|
63 |
+
img_list = img_list[:10]
|
64 |
+
print test_data + ':' + str(len(img_list)) + ' images'
|
65 |
+
|
66 |
+
preds = heatmap_model.get_landmark_predictions(img_list=img_list, pdm_models_dir=pdm_path, clm_model_path=clm_path,
|
67 |
+
reuse=reuse)
|
68 |
+
|
69 |
+
init_lms = preds['E']
|
70 |
+
ppdm_lms = preds['ECp']
|
71 |
+
clm_lms = preds['ECpT']
|
72 |
+
ect_lms = preds['ECT']
|
73 |
+
ecptp_jaw_lms = preds['ECpTp_jaw']
|
74 |
+
ecptp_out_lms = preds['ECpTp_out']
|
75 |
+
|
76 |
+
filehandler = open(os.path.join(out_temp,'E_lms'),"wb")
|
77 |
+
pickle.dump(init_lms,filehandler)
|
78 |
+
filehandler.close()
|
79 |
+
|
80 |
+
filehandler = open(os.path.join(out_temp,'ECp_lms'),"wb")
|
81 |
+
pickle.dump(ppdm_lms,filehandler)
|
82 |
+
filehandler.close()
|
83 |
+
|
84 |
+
filehandler = open(os.path.join(out_temp,'ECpT_lms'),"wb")
|
85 |
+
pickle.dump(clm_lms,filehandler)
|
86 |
+
filehandler.close()
|
87 |
+
|
88 |
+
filehandler = open(os.path.join(out_temp,'ECT_lms'),"wb")
|
89 |
+
pickle.dump(ect_lms,filehandler)
|
90 |
+
filehandler.close()
|
91 |
+
|
92 |
+
filehandler = open(os.path.join(out_temp,'ECpTp_jaw_lms'),"wb")
|
93 |
+
pickle.dump(ecptp_jaw_lms,filehandler)
|
94 |
+
filehandler.close()
|
95 |
+
|
96 |
+
filehandler = open(os.path.join(out_temp,'ECpTp_out_lms'),"wb")
|
97 |
+
pickle.dump(ecptp_out_lms,filehandler)
|
98 |
+
filehandler.close()
|
99 |
+
|
100 |
print("\nDone!\n")
|
MakeItTalk/thirdparty/face_of_art/old/temp/run_tests_fusion.py
CHANGED
@@ -1,136 +1,136 @@
|
|
1 |
-
import tensorflow as tf
|
2 |
-
from deep_heatmaps_model_fusion_net import DeepHeatmapsModel
|
3 |
-
import os
|
4 |
-
import numpy as np
|
5 |
-
|
6 |
-
|
7 |
-
data_dir = '/Users/arik/Dropbox/a_mac_thesis/face_heatmap_networks/conventional_landmark_detection_dataset/'
|
8 |
-
output_dir = 'tests_fusion'
|
9 |
-
pre_train_model_name = 'deep_heatmaps-50000'
|
10 |
-
num_tests = 5
|
11 |
-
params = np.logspace(-4, -6, num_tests)
|
12 |
-
|
13 |
-
flags = tf.app.flags
|
14 |
-
|
15 |
-
# mode and logging parameters
|
16 |
-
flags.DEFINE_integer('print_every', 100, "print losses to screen + log every X steps")
|
17 |
-
flags.DEFINE_integer('save_every', 5000, "save model every X steps")
|
18 |
-
flags.DEFINE_integer('sample_every', 5000, "sample heatmaps + landmark predictions every X steps")
|
19 |
-
flags.DEFINE_integer('sample_grid', 4, 'number of training images in sample')
|
20 |
-
flags.DEFINE_bool('sample_to_log', True, 'samples will be saved to tensorboard log')
|
21 |
-
flags.DEFINE_integer('valid_size', 0, 'number of validation images to run')
|
22 |
-
flags.DEFINE_integer('log_valid_every', 5, 'evaluate on valid set every X epochs')
|
23 |
-
flags.DEFINE_integer('debug_data_size', 20, 'subset data size to test in debug mode')
|
24 |
-
flags.DEFINE_bool('debug', False, 'run in debug mode - use subset of the data')
|
25 |
-
|
26 |
-
# define paths
|
27 |
-
flags.DEFINE_string('output_dir', output_dir, "directory for saving models, logs and samples")
|
28 |
-
flags.DEFINE_string('img_path', data_dir, "data directory")
|
29 |
-
flags.DEFINE_string('test_model_path', 'model/deep_heatmaps-50000', "saved model to test")
|
30 |
-
flags.DEFINE_string('test_data', 'full', 'test set to use: full/common/challenging/test/art')
|
31 |
-
flags.DEFINE_string('valid_data', 'full', 'validation set to use: full/common/challenging/test/art')
|
32 |
-
flags.DEFINE_string('train_crop_dir', 'crop_gt_margin_0.25',"directory of train images cropped to bb (+margin)")
|
33 |
-
flags.DEFINE_string('img_dir_ns', 'crop_gt_margin_0.25_ns',"dir of train imgs cropped to bb + style transfer")
|
34 |
-
flags.DEFINE_string('epoch_data_dir', 'epoch_data', "directory containing pre-augmented data for each epoch")
|
35 |
-
flags.DEFINE_bool('use_epoch_data', False, "use pre-augmented data")
|
36 |
-
|
37 |
-
|
38 |
-
# pretrain parameters (for fine-tuning / resume training)
|
39 |
-
flags.DEFINE_string('pre_train_model_name', pre_train_model_name, 'pretrained model name (e.g. deep_heatmaps-50000')
|
40 |
-
flags.DEFINE_bool('load_pretrain', False, "load pretrained weight?")
|
41 |
-
flags.DEFINE_bool('load_primary_only', False, 'fine-tuning using only primary network weights')
|
42 |
-
|
43 |
-
# input data parameters
|
44 |
-
flags.DEFINE_integer('image_size', 256, "image size")
|
45 |
-
flags.DEFINE_integer('c_dim', 3, "color channels")
|
46 |
-
flags.DEFINE_integer('num_landmarks', 68, "number of face landmarks")
|
47 |
-
flags.DEFINE_float('sigma', 6, "std for heatmap generation gaussian")
|
48 |
-
flags.DEFINE_integer('scale', 1, 'scale for image normalization 255/1/0')
|
49 |
-
flags.DEFINE_float('margin', 0.25, 'margin for face crops - % of bb size')
|
50 |
-
flags.DEFINE_string('bb_type', 'gt', "bb to use - 'gt':for ground truth / 'init':for face detector output")
|
51 |
-
flags.DEFINE_bool('approx_maps', True, 'use heatmap approximation - major speed up')
|
52 |
-
flags.DEFINE_float('win_mult', 3.33335, 'gaussian filter size for approx maps: 2 * sigma * win_mult + 1')
|
53 |
-
|
54 |
-
# optimization parameters
|
55 |
-
flags.DEFINE_float('l_weight_primary', 1., 'primary loss weight')
|
56 |
-
flags.DEFINE_float('l_weight_fusion', 0., 'fusion loss weight')
|
57 |
-
flags.DEFINE_float('l_weight_upsample', 3., 'upsample loss weight')
|
58 |
-
flags.DEFINE_integer('train_iter', 100000, 'maximum training iterations')
|
59 |
-
flags.DEFINE_integer('batch_size', 6, "batch_size")
|
60 |
-
flags.DEFINE_float('learning_rate', 1e-4, "initial learning rate")
|
61 |
-
flags.DEFINE_bool('adam_optimizer', True, "use adam optimizer (if False momentum optimizer is used)")
|
62 |
-
flags.DEFINE_float('momentum', 0.95, "optimizer momentum (if adam_optimizer==False)")
|
63 |
-
flags.DEFINE_integer('step', 100000, 'step for lr decay')
|
64 |
-
flags.DEFINE_float('gamma', 0.1, 'exponential base for lr decay')
|
65 |
-
flags.DEFINE_float('reg', 0, 'scalar multiplier for weight decay (0 to disable)')
|
66 |
-
flags.DEFINE_string('weight_initializer','xavier', 'weight initializer: random_normal / xavier')
|
67 |
-
flags.DEFINE_float('weight_initializer_std', 0.01, 'std for random_normal weight initializer')
|
68 |
-
flags.DEFINE_float('bias_initializer', 0.0, 'constant value for bias initializer')
|
69 |
-
|
70 |
-
# augmentation parameters
|
71 |
-
flags.DEFINE_bool('augment_basic', True,"use basic augmentation?")
|
72 |
-
flags.DEFINE_integer('basic_start', 0, 'min epoch to start basic augmentation')
|
73 |
-
flags.DEFINE_bool('augment_texture', False, "use artistic texture augmentation?")
|
74 |
-
flags.DEFINE_float('p_texture', 0., 'initial probability of artistic texture augmentation')
|
75 |
-
flags.DEFINE_bool('augment_geom', False,"use artistic geometric augmentation?")
|
76 |
-
flags.DEFINE_float('p_geom', 0., 'initial probability of artistic geometric augmentation')
|
77 |
-
flags.DEFINE_integer('artistic_step', -1, 'step for increasing probability of artistic augmentation in epochs')
|
78 |
-
flags.DEFINE_integer('artistic_start', 0, 'min epoch to start artistic augmentation')
|
79 |
-
|
80 |
-
|
81 |
-
FLAGS = flags.FLAGS
|
82 |
-
|
83 |
-
if not os.path.exists(FLAGS.output_dir):
|
84 |
-
os.mkdir(FLAGS.output_dir)
|
85 |
-
|
86 |
-
|
87 |
-
def main(_):
|
88 |
-
|
89 |
-
for i, param in enumerate(params):
|
90 |
-
|
91 |
-
test_dir = os.path.join(FLAGS.output_dir, str(param))
|
92 |
-
if not os.path.exists(test_dir):
|
93 |
-
os.mkdir(test_dir)
|
94 |
-
|
95 |
-
print ('\n##### RUNNING TESTS FUSION (%d/%d) #####' % (i + 1, len(params)))
|
96 |
-
print ('##### current directory: ' + test_dir)
|
97 |
-
|
98 |
-
save_model_path = os.path.join(test_dir,'model')
|
99 |
-
save_sample_path = os.path.join(test_dir, 'sample')
|
100 |
-
save_log_path = os.path.join(test_dir, 'logs')
|
101 |
-
|
102 |
-
# create directories if not exist
|
103 |
-
if not os.path.exists(save_model_path):
|
104 |
-
os.mkdir(save_model_path)
|
105 |
-
if not os.path.exists(save_log_path):
|
106 |
-
os.mkdir(save_log_path)
|
107 |
-
if not os.path.exists(save_sample_path) and not FLAGS.sample_to_log:
|
108 |
-
os.mkdir(save_sample_path)
|
109 |
-
|
110 |
-
tf.reset_default_graph() # reset graph
|
111 |
-
|
112 |
-
model = DeepHeatmapsModel(
|
113 |
-
mode='TRAIN', train_iter=FLAGS.train_iter, batch_size=FLAGS.batch_size, learning_rate=param,
|
114 |
-
l_weight_primary=FLAGS.l_weight_primary, l_weight_fusion=FLAGS.l_weight_fusion,
|
115 |
-
l_weight_upsample=FLAGS.l_weight_upsample, reg=FLAGS.reg,
|
116 |
-
adam_optimizer=FLAGS.adam_optimizer, momentum=FLAGS.momentum, step=FLAGS.step, gamma=FLAGS.gamma,
|
117 |
-
weight_initializer=FLAGS.weight_initializer, weight_initializer_std=FLAGS.weight_initializer_std,
|
118 |
-
bias_initializer=FLAGS.bias_initializer, image_size=FLAGS.image_size, c_dim=FLAGS.c_dim,
|
119 |
-
num_landmarks=FLAGS.num_landmarks, sigma=FLAGS.sigma, scale=FLAGS.scale, margin=FLAGS.margin,
|
120 |
-
bb_type=FLAGS.bb_type, approx_maps=FLAGS.approx_maps, win_mult=FLAGS.win_mult,
|
121 |
-
augment_basic=FLAGS.augment_basic, basic_start=FLAGS.basic_start, augment_texture=FLAGS.augment_texture,
|
122 |
-
p_texture=FLAGS.p_texture, augment_geom=FLAGS.augment_geom, p_geom=FLAGS.p_geom, artistic_step=FLAGS.artistic_step,
|
123 |
-
artistic_start=FLAGS.artistic_start, output_dir=FLAGS.output_dir, save_model_path=save_model_path,
|
124 |
-
save_sample_path=save_sample_path, save_log_path=save_log_path, test_model_path=FLAGS.test_model_path,
|
125 |
-
pre_train_path=os.path.join(save_model_path, FLAGS.pre_train_model_name), load_pretrain=FLAGS.load_pretrain,
|
126 |
-
load_primary_only=FLAGS.load_primary_only, img_path=FLAGS.img_path, test_data=FLAGS.test_data,
|
127 |
-
valid_data=FLAGS.valid_data, valid_size=FLAGS.valid_size, log_valid_every=FLAGS.log_valid_every,
|
128 |
-
train_crop_dir=FLAGS.train_crop_dir, img_dir_ns=FLAGS.img_dir_ns, print_every=FLAGS.print_every,
|
129 |
-
save_every=FLAGS.save_every, sample_every=FLAGS.sample_every, sample_grid=FLAGS.sample_grid,
|
130 |
-
sample_to_log=FLAGS.sample_to_log, debug_data_size=FLAGS.debug_data_size, debug=FLAGS.debug,
|
131 |
-
use_epoch_data = FLAGS.use_epoch_data, epoch_data_dir = FLAGS.epoch_data_dir)
|
132 |
-
|
133 |
-
model.train()
|
134 |
-
|
135 |
-
if __name__ == '__main__':
|
136 |
-
tf.app.run()
|
|
|
1 |
+
import tensorflow as tf
|
2 |
+
from deep_heatmaps_model_fusion_net import DeepHeatmapsModel
|
3 |
+
import os
|
4 |
+
import numpy as np
|
5 |
+
|
6 |
+
|
7 |
+
data_dir = '/Users/arik/Dropbox/a_mac_thesis/face_heatmap_networks/conventional_landmark_detection_dataset/'
|
8 |
+
output_dir = 'tests_fusion'
|
9 |
+
pre_train_model_name = 'deep_heatmaps-50000'
|
10 |
+
num_tests = 5
|
11 |
+
params = np.logspace(-4, -6, num_tests)
|
12 |
+
|
13 |
+
flags = tf.app.flags
|
14 |
+
|
15 |
+
# mode and logging parameters
|
16 |
+
flags.DEFINE_integer('print_every', 100, "print losses to screen + log every X steps")
|
17 |
+
flags.DEFINE_integer('save_every', 5000, "save model every X steps")
|
18 |
+
flags.DEFINE_integer('sample_every', 5000, "sample heatmaps + landmark predictions every X steps")
|
19 |
+
flags.DEFINE_integer('sample_grid', 4, 'number of training images in sample')
|
20 |
+
flags.DEFINE_bool('sample_to_log', True, 'samples will be saved to tensorboard log')
|
21 |
+
flags.DEFINE_integer('valid_size', 0, 'number of validation images to run')
|
22 |
+
flags.DEFINE_integer('log_valid_every', 5, 'evaluate on valid set every X epochs')
|
23 |
+
flags.DEFINE_integer('debug_data_size', 20, 'subset data size to test in debug mode')
|
24 |
+
flags.DEFINE_bool('debug', False, 'run in debug mode - use subset of the data')
|
25 |
+
|
26 |
+
# define paths
|
27 |
+
flags.DEFINE_string('output_dir', output_dir, "directory for saving models, logs and samples")
|
28 |
+
flags.DEFINE_string('img_path', data_dir, "data directory")
|
29 |
+
flags.DEFINE_string('test_model_path', 'model/deep_heatmaps-50000', "saved model to test")
|
30 |
+
flags.DEFINE_string('test_data', 'full', 'test set to use: full/common/challenging/test/art')
|
31 |
+
flags.DEFINE_string('valid_data', 'full', 'validation set to use: full/common/challenging/test/art')
|
32 |
+
flags.DEFINE_string('train_crop_dir', 'crop_gt_margin_0.25',"directory of train images cropped to bb (+margin)")
|
33 |
+
flags.DEFINE_string('img_dir_ns', 'crop_gt_margin_0.25_ns',"dir of train imgs cropped to bb + style transfer")
|
34 |
+
flags.DEFINE_string('epoch_data_dir', 'epoch_data', "directory containing pre-augmented data for each epoch")
|
35 |
+
flags.DEFINE_bool('use_epoch_data', False, "use pre-augmented data")
|
36 |
+
|
37 |
+
|
38 |
+
# pretrain parameters (for fine-tuning / resume training)
|
39 |
+
flags.DEFINE_string('pre_train_model_name', pre_train_model_name, 'pretrained model name (e.g. deep_heatmaps-50000')
|
40 |
+
flags.DEFINE_bool('load_pretrain', False, "load pretrained weight?")
|
41 |
+
flags.DEFINE_bool('load_primary_only', False, 'fine-tuning using only primary network weights')
|
42 |
+
|
43 |
+
# input data parameters
|
44 |
+
flags.DEFINE_integer('image_size', 256, "image size")
|
45 |
+
flags.DEFINE_integer('c_dim', 3, "color channels")
|
46 |
+
flags.DEFINE_integer('num_landmarks', 68, "number of face landmarks")
|
47 |
+
flags.DEFINE_float('sigma', 6, "std for heatmap generation gaussian")
|
48 |
+
flags.DEFINE_integer('scale', 1, 'scale for image normalization 255/1/0')
|
49 |
+
flags.DEFINE_float('margin', 0.25, 'margin for face crops - % of bb size')
|
50 |
+
flags.DEFINE_string('bb_type', 'gt', "bb to use - 'gt':for ground truth / 'init':for face detector output")
|
51 |
+
flags.DEFINE_bool('approx_maps', True, 'use heatmap approximation - major speed up')
|
52 |
+
flags.DEFINE_float('win_mult', 3.33335, 'gaussian filter size for approx maps: 2 * sigma * win_mult + 1')
|
53 |
+
|
54 |
+
# optimization parameters
|
55 |
+
flags.DEFINE_float('l_weight_primary', 1., 'primary loss weight')
|
56 |
+
flags.DEFINE_float('l_weight_fusion', 0., 'fusion loss weight')
|
57 |
+
flags.DEFINE_float('l_weight_upsample', 3., 'upsample loss weight')
|
58 |
+
flags.DEFINE_integer('train_iter', 100000, 'maximum training iterations')
|
59 |
+
flags.DEFINE_integer('batch_size', 6, "batch_size")
|
60 |
+
flags.DEFINE_float('learning_rate', 1e-4, "initial learning rate")
|
61 |
+
flags.DEFINE_bool('adam_optimizer', True, "use adam optimizer (if False momentum optimizer is used)")
|
62 |
+
flags.DEFINE_float('momentum', 0.95, "optimizer momentum (if adam_optimizer==False)")
|
63 |
+
flags.DEFINE_integer('step', 100000, 'step for lr decay')
|
64 |
+
flags.DEFINE_float('gamma', 0.1, 'exponential base for lr decay')
|
65 |
+
flags.DEFINE_float('reg', 0, 'scalar multiplier for weight decay (0 to disable)')
|
66 |
+
flags.DEFINE_string('weight_initializer','xavier', 'weight initializer: random_normal / xavier')
|
67 |
+
flags.DEFINE_float('weight_initializer_std', 0.01, 'std for random_normal weight initializer')
|
68 |
+
flags.DEFINE_float('bias_initializer', 0.0, 'constant value for bias initializer')
|
69 |
+
|
70 |
+
# augmentation parameters
|
71 |
+
flags.DEFINE_bool('augment_basic', True,"use basic augmentation?")
|
72 |
+
flags.DEFINE_integer('basic_start', 0, 'min epoch to start basic augmentation')
|
73 |
+
flags.DEFINE_bool('augment_texture', False, "use artistic texture augmentation?")
|
74 |
+
flags.DEFINE_float('p_texture', 0., 'initial probability of artistic texture augmentation')
|
75 |
+
flags.DEFINE_bool('augment_geom', False,"use artistic geometric augmentation?")
|
76 |
+
flags.DEFINE_float('p_geom', 0., 'initial probability of artistic geometric augmentation')
|
77 |
+
flags.DEFINE_integer('artistic_step', -1, 'step for increasing probability of artistic augmentation in epochs')
|
78 |
+
flags.DEFINE_integer('artistic_start', 0, 'min epoch to start artistic augmentation')
|
79 |
+
|
80 |
+
|
81 |
+
FLAGS = flags.FLAGS
|
82 |
+
|
83 |
+
if not os.path.exists(FLAGS.output_dir):
|
84 |
+
os.mkdir(FLAGS.output_dir)
|
85 |
+
|
86 |
+
|
87 |
+
def main(_):
|
88 |
+
|
89 |
+
for i, param in enumerate(params):
|
90 |
+
|
91 |
+
test_dir = os.path.join(FLAGS.output_dir, str(param))
|
92 |
+
if not os.path.exists(test_dir):
|
93 |
+
os.mkdir(test_dir)
|
94 |
+
|
95 |
+
print ('\n##### RUNNING TESTS FUSION (%d/%d) #####' % (i + 1, len(params)))
|
96 |
+
print ('##### current directory: ' + test_dir)
|
97 |
+
|
98 |
+
save_model_path = os.path.join(test_dir,'model')
|
99 |
+
save_sample_path = os.path.join(test_dir, 'sample')
|
100 |
+
save_log_path = os.path.join(test_dir, 'logs')
|
101 |
+
|
102 |
+
# create directories if not exist
|
103 |
+
if not os.path.exists(save_model_path):
|
104 |
+
os.mkdir(save_model_path)
|
105 |
+
if not os.path.exists(save_log_path):
|
106 |
+
os.mkdir(save_log_path)
|
107 |
+
if not os.path.exists(save_sample_path) and not FLAGS.sample_to_log:
|
108 |
+
os.mkdir(save_sample_path)
|
109 |
+
|
110 |
+
tf.reset_default_graph() # reset graph
|
111 |
+
|
112 |
+
model = DeepHeatmapsModel(
|
113 |
+
mode='TRAIN', train_iter=FLAGS.train_iter, batch_size=FLAGS.batch_size, learning_rate=param,
|
114 |
+
l_weight_primary=FLAGS.l_weight_primary, l_weight_fusion=FLAGS.l_weight_fusion,
|
115 |
+
l_weight_upsample=FLAGS.l_weight_upsample, reg=FLAGS.reg,
|
116 |
+
adam_optimizer=FLAGS.adam_optimizer, momentum=FLAGS.momentum, step=FLAGS.step, gamma=FLAGS.gamma,
|
117 |
+
weight_initializer=FLAGS.weight_initializer, weight_initializer_std=FLAGS.weight_initializer_std,
|
118 |
+
bias_initializer=FLAGS.bias_initializer, image_size=FLAGS.image_size, c_dim=FLAGS.c_dim,
|
119 |
+
num_landmarks=FLAGS.num_landmarks, sigma=FLAGS.sigma, scale=FLAGS.scale, margin=FLAGS.margin,
|
120 |
+
bb_type=FLAGS.bb_type, approx_maps=FLAGS.approx_maps, win_mult=FLAGS.win_mult,
|
121 |
+
augment_basic=FLAGS.augment_basic, basic_start=FLAGS.basic_start, augment_texture=FLAGS.augment_texture,
|
122 |
+
p_texture=FLAGS.p_texture, augment_geom=FLAGS.augment_geom, p_geom=FLAGS.p_geom, artistic_step=FLAGS.artistic_step,
|
123 |
+
artistic_start=FLAGS.artistic_start, output_dir=FLAGS.output_dir, save_model_path=save_model_path,
|
124 |
+
save_sample_path=save_sample_path, save_log_path=save_log_path, test_model_path=FLAGS.test_model_path,
|
125 |
+
pre_train_path=os.path.join(save_model_path, FLAGS.pre_train_model_name), load_pretrain=FLAGS.load_pretrain,
|
126 |
+
load_primary_only=FLAGS.load_primary_only, img_path=FLAGS.img_path, test_data=FLAGS.test_data,
|
127 |
+
valid_data=FLAGS.valid_data, valid_size=FLAGS.valid_size, log_valid_every=FLAGS.log_valid_every,
|
128 |
+
train_crop_dir=FLAGS.train_crop_dir, img_dir_ns=FLAGS.img_dir_ns, print_every=FLAGS.print_every,
|
129 |
+
save_every=FLAGS.save_every, sample_every=FLAGS.sample_every, sample_grid=FLAGS.sample_grid,
|
130 |
+
sample_to_log=FLAGS.sample_to_log, debug_data_size=FLAGS.debug_data_size, debug=FLAGS.debug,
|
131 |
+
use_epoch_data = FLAGS.use_epoch_data, epoch_data_dir = FLAGS.epoch_data_dir)
|
132 |
+
|
133 |
+
model.train()
|
134 |
+
|
135 |
+
if __name__ == '__main__':
|
136 |
+
tf.app.run()
|
MakeItTalk/thirdparty/face_of_art/old/temp/run_tests_primary.py
CHANGED
@@ -1,130 +1,130 @@
|
|
1 |
-
import tensorflow as tf
|
2 |
-
from deep_heatmaps_model_primary_net import DeepHeatmapsModel
|
3 |
-
import os
|
4 |
-
import numpy as np
|
5 |
-
|
6 |
-
|
7 |
-
data_dir = '/Users/arik/Dropbox/a_mac_thesis/face_heatmap_networks/conventional_landmark_detection_dataset/'
|
8 |
-
output_dir = 'tests_primary'
|
9 |
-
pre_train_model_name = 'deep_heatmaps-50000'
|
10 |
-
num_tests = 5
|
11 |
-
params = np.logspace(-4, -6, num_tests)
|
12 |
-
|
13 |
-
flags = tf.app.flags
|
14 |
-
|
15 |
-
# mode and logging parameters
|
16 |
-
flags.DEFINE_integer('print_every', 100, "print losses to screen + log every X steps")
|
17 |
-
flags.DEFINE_integer('save_every', 5000, "save model every X steps")
|
18 |
-
flags.DEFINE_integer('sample_every', 5000, "sample heatmaps + landmark predictions every X steps")
|
19 |
-
flags.DEFINE_integer('sample_grid', 9, 'number of training images in sample')
|
20 |
-
flags.DEFINE_bool('sample_to_log', True, 'samples will be saved to tensorboard log')
|
21 |
-
flags.DEFINE_integer('valid_size', 0, 'number of validation images to run')
|
22 |
-
flags.DEFINE_integer('log_valid_every', 5, 'evaluate on valid set every X epochs')
|
23 |
-
flags.DEFINE_integer('debug_data_size', 20, 'subset data size to test in debug mode')
|
24 |
-
flags.DEFINE_bool('debug', False, 'run in debug mode - use subset of the data')
|
25 |
-
|
26 |
-
# define paths
|
27 |
-
flags.DEFINE_string('output_dir', output_dir, "directory for saving models, logs and samples")
|
28 |
-
flags.DEFINE_string('img_path', data_dir, "data directory")
|
29 |
-
flags.DEFINE_string('test_model_path', 'model/deep_heatmaps-50000', "saved model to test")
|
30 |
-
flags.DEFINE_string('test_data', 'full', 'test set to use: full/common/challenging/test/art')
|
31 |
-
flags.DEFINE_string('valid_data', 'full', 'validation set to use: full/common/challenging/test/art')
|
32 |
-
flags.DEFINE_string('train_crop_dir', 'crop_gt_margin_0.25',"directory of train images cropped to bb (+margin)")
|
33 |
-
flags.DEFINE_string('img_dir_ns', 'crop_gt_margin_0.25_ns',"dir of train imgs cropped to bb + style transfer")
|
34 |
-
flags.DEFINE_string('epoch_data_dir', 'epoch_data', "directory containing pre-augmented data for each epoch")
|
35 |
-
flags.DEFINE_bool('use_epoch_data', False, "use pre-augmented data")
|
36 |
-
|
37 |
-
|
38 |
-
# pretrain parameters (for fine-tuning / resume training)
|
39 |
-
flags.DEFINE_string('pre_train_model_name', pre_train_model_name, 'pretrained model name (e.g. deep_heatmaps-50000')
|
40 |
-
flags.DEFINE_bool('load_pretrain', False, "load pretrained weight?")
|
41 |
-
|
42 |
-
# input data parameters
|
43 |
-
flags.DEFINE_integer('image_size', 256, "image size")
|
44 |
-
flags.DEFINE_integer('c_dim', 3, "color channels")
|
45 |
-
flags.DEFINE_integer('num_landmarks', 68, "number of face landmarks")
|
46 |
-
flags.DEFINE_float('sigma', 1.5, "std for heatmap generation gaussian")
|
47 |
-
flags.DEFINE_integer('scale', 1, 'scale for image normalization 255/1/0')
|
48 |
-
flags.DEFINE_float('margin', 0.25, 'margin for face crops - % of bb size')
|
49 |
-
flags.DEFINE_string('bb_type', 'gt', "bb to use - 'gt':for ground truth / 'init':for face detector output")
|
50 |
-
flags.DEFINE_bool('approx_maps', True, 'use heatmap approximation - major speed up')
|
51 |
-
flags.DEFINE_float('win_mult', 3.33335, 'gaussian filter size for approx maps: 2 * sigma * win_mult + 1')
|
52 |
-
|
53 |
-
# optimization parameters
|
54 |
-
flags.DEFINE_integer('train_iter', 100000, 'maximum training iterations')
|
55 |
-
flags.DEFINE_integer('batch_size', 10, "batch_size")
|
56 |
-
flags.DEFINE_float('learning_rate', 1e-4, "initial learning rate")
|
57 |
-
flags.DEFINE_bool('adam_optimizer', True, "use adam optimizer (if False momentum optimizer is used)")
|
58 |
-
flags.DEFINE_float('momentum', 0.95, "optimizer momentum (if adam_optimizer==False)")
|
59 |
-
flags.DEFINE_integer('step', 100000, 'step for lr decay')
|
60 |
-
flags.DEFINE_float('gamma', 0.1, 'exponential base for lr decay')
|
61 |
-
flags.DEFINE_float('reg', 0, 'scalar multiplier for weight decay (0 to disable)')
|
62 |
-
flags.DEFINE_string('weight_initializer','xavier', 'weight initializer: random_normal / xavier')
|
63 |
-
flags.DEFINE_float('weight_initializer_std', 0.01, 'std for random_normal weight initializer')
|
64 |
-
flags.DEFINE_float('bias_initializer', 0.0, 'constant value for bias initializer')
|
65 |
-
|
66 |
-
# augmentation parameters
|
67 |
-
flags.DEFINE_bool('augment_basic', True,"use basic augmentation?")
|
68 |
-
flags.DEFINE_integer('basic_start', 0, 'min epoch to start basic augmentation')
|
69 |
-
flags.DEFINE_bool('augment_texture', False, "use artistic texture augmentation?")
|
70 |
-
flags.DEFINE_float('p_texture', 0., 'initial probability of artistic texture augmentation')
|
71 |
-
flags.DEFINE_bool('augment_geom', False,"use artistic geometric augmentation?")
|
72 |
-
flags.DEFINE_float('p_geom', 0., 'initial probability of artistic geometric augmentation')
|
73 |
-
flags.DEFINE_integer('artistic_step', -1, 'step for increasing probability of artistic augmentation in epochs')
|
74 |
-
flags.DEFINE_integer('artistic_start', 0, 'min epoch to start artistic augmentation')
|
75 |
-
|
76 |
-
|
77 |
-
FLAGS = flags.FLAGS
|
78 |
-
|
79 |
-
if not os.path.exists(FLAGS.output_dir):
|
80 |
-
os.mkdir(FLAGS.output_dir)
|
81 |
-
|
82 |
-
|
83 |
-
def main(_):
|
84 |
-
|
85 |
-
for i, param in enumerate(params):
|
86 |
-
|
87 |
-
test_dir = os.path.join(FLAGS.output_dir, str(param))
|
88 |
-
if not os.path.exists(test_dir):
|
89 |
-
os.mkdir(test_dir)
|
90 |
-
|
91 |
-
print ('\n##### RUNNING TESTS PRIMARY (%d/%d) #####' % (i + 1, len(params)))
|
92 |
-
print ('##### current directory: ' + test_dir)
|
93 |
-
|
94 |
-
save_model_path = os.path.join(test_dir,'model')
|
95 |
-
save_sample_path = os.path.join(test_dir, 'sample')
|
96 |
-
save_log_path = os.path.join(test_dir, 'logs')
|
97 |
-
|
98 |
-
# create directories if not exist
|
99 |
-
if not os.path.exists(save_model_path):
|
100 |
-
os.mkdir(save_model_path)
|
101 |
-
if not os.path.exists(save_log_path):
|
102 |
-
os.mkdir(save_log_path)
|
103 |
-
if not os.path.exists(save_sample_path) and not FLAGS.sample_to_log:
|
104 |
-
os.mkdir(save_sample_path)
|
105 |
-
|
106 |
-
tf.reset_default_graph() # reset graph
|
107 |
-
|
108 |
-
model = DeepHeatmapsModel(
|
109 |
-
mode='TRAIN', train_iter=FLAGS.train_iter, batch_size=FLAGS.batch_size, learning_rate=param,
|
110 |
-
adam_optimizer=FLAGS.adam_optimizer, momentum=FLAGS.momentum, step=FLAGS.step, gamma=FLAGS.gamma,
|
111 |
-
reg=FLAGS.reg, weight_initializer=FLAGS.weight_initializer, weight_initializer_std=FLAGS.weight_initializer_std,
|
112 |
-
bias_initializer=FLAGS.bias_initializer, image_size=FLAGS.image_size, c_dim=FLAGS.c_dim,
|
113 |
-
num_landmarks=FLAGS.num_landmarks, sigma=FLAGS.sigma, scale=FLAGS.scale, margin=FLAGS.margin,
|
114 |
-
bb_type=FLAGS.bb_type, approx_maps=FLAGS.approx_maps, win_mult=FLAGS.win_mult,
|
115 |
-
augment_basic=FLAGS.augment_basic, basic_start=FLAGS.basic_start, augment_texture=FLAGS.augment_texture,
|
116 |
-
p_texture=FLAGS.p_texture, augment_geom=FLAGS.augment_geom, p_geom=FLAGS.p_geom,
|
117 |
-
artistic_step=FLAGS.artistic_step, artistic_start=FLAGS.artistic_start, output_dir=FLAGS.output_dir,
|
118 |
-
save_model_path=save_model_path, save_sample_path=save_sample_path, save_log_path=save_log_path,
|
119 |
-
test_model_path=FLAGS.test_model_path, pre_train_path=os.path.join(save_model_path, FLAGS.pre_train_model_name),
|
120 |
-
load_pretrain=FLAGS.load_pretrain, img_path=FLAGS.img_path, test_data=FLAGS.test_data,
|
121 |
-
valid_data=FLAGS.valid_data, valid_size=FLAGS.valid_size, log_valid_every=FLAGS.log_valid_every,
|
122 |
-
train_crop_dir=FLAGS.train_crop_dir, img_dir_ns=FLAGS.img_dir_ns, print_every=FLAGS.print_every,
|
123 |
-
save_every=FLAGS.save_every, sample_every=FLAGS.sample_every, sample_grid=FLAGS.sample_grid,
|
124 |
-
sample_to_log=FLAGS.sample_to_log, debug_data_size=FLAGS.debug_data_size, debug=FLAGS.debug,
|
125 |
-
use_epoch_data=FLAGS.use_epoch_data, epoch_data_dir=FLAGS.epoch_data_dir)
|
126 |
-
|
127 |
-
model.train()
|
128 |
-
|
129 |
-
if __name__ == '__main__':
|
130 |
-
tf.app.run()
|
|
|
1 |
+
import tensorflow as tf
|
2 |
+
from deep_heatmaps_model_primary_net import DeepHeatmapsModel
|
3 |
+
import os
|
4 |
+
import numpy as np
|
5 |
+
|
6 |
+
|
7 |
+
data_dir = '/Users/arik/Dropbox/a_mac_thesis/face_heatmap_networks/conventional_landmark_detection_dataset/'
|
8 |
+
output_dir = 'tests_primary'
|
9 |
+
pre_train_model_name = 'deep_heatmaps-50000'
|
10 |
+
num_tests = 5
|
11 |
+
params = np.logspace(-4, -6, num_tests)
|
12 |
+
|
13 |
+
flags = tf.app.flags
|
14 |
+
|
15 |
+
# mode and logging parameters
|
16 |
+
flags.DEFINE_integer('print_every', 100, "print losses to screen + log every X steps")
|
17 |
+
flags.DEFINE_integer('save_every', 5000, "save model every X steps")
|
18 |
+
flags.DEFINE_integer('sample_every', 5000, "sample heatmaps + landmark predictions every X steps")
|
19 |
+
flags.DEFINE_integer('sample_grid', 9, 'number of training images in sample')
|
20 |
+
flags.DEFINE_bool('sample_to_log', True, 'samples will be saved to tensorboard log')
|
21 |
+
flags.DEFINE_integer('valid_size', 0, 'number of validation images to run')
|
22 |
+
flags.DEFINE_integer('log_valid_every', 5, 'evaluate on valid set every X epochs')
|
23 |
+
flags.DEFINE_integer('debug_data_size', 20, 'subset data size to test in debug mode')
|
24 |
+
flags.DEFINE_bool('debug', False, 'run in debug mode - use subset of the data')
|
25 |
+
|
26 |
+
# define paths
|
27 |
+
flags.DEFINE_string('output_dir', output_dir, "directory for saving models, logs and samples")
|
28 |
+
flags.DEFINE_string('img_path', data_dir, "data directory")
|
29 |
+
flags.DEFINE_string('test_model_path', 'model/deep_heatmaps-50000', "saved model to test")
|
30 |
+
flags.DEFINE_string('test_data', 'full', 'test set to use: full/common/challenging/test/art')
|
31 |
+
flags.DEFINE_string('valid_data', 'full', 'validation set to use: full/common/challenging/test/art')
|
32 |
+
flags.DEFINE_string('train_crop_dir', 'crop_gt_margin_0.25',"directory of train images cropped to bb (+margin)")
|
33 |
+
flags.DEFINE_string('img_dir_ns', 'crop_gt_margin_0.25_ns',"dir of train imgs cropped to bb + style transfer")
|
34 |
+
flags.DEFINE_string('epoch_data_dir', 'epoch_data', "directory containing pre-augmented data for each epoch")
|
35 |
+
flags.DEFINE_bool('use_epoch_data', False, "use pre-augmented data")
|
36 |
+
|
37 |
+
|
38 |
+
# pretrain parameters (for fine-tuning / resume training)
|
39 |
+
flags.DEFINE_string('pre_train_model_name', pre_train_model_name, 'pretrained model name (e.g. deep_heatmaps-50000')
|
40 |
+
flags.DEFINE_bool('load_pretrain', False, "load pretrained weight?")
|
41 |
+
|
42 |
+
# input data parameters
|
43 |
+
flags.DEFINE_integer('image_size', 256, "image size")
|
44 |
+
flags.DEFINE_integer('c_dim', 3, "color channels")
|
45 |
+
flags.DEFINE_integer('num_landmarks', 68, "number of face landmarks")
|
46 |
+
flags.DEFINE_float('sigma', 1.5, "std for heatmap generation gaussian")
|
47 |
+
flags.DEFINE_integer('scale', 1, 'scale for image normalization 255/1/0')
|
48 |
+
flags.DEFINE_float('margin', 0.25, 'margin for face crops - % of bb size')
|
49 |
+
flags.DEFINE_string('bb_type', 'gt', "bb to use - 'gt':for ground truth / 'init':for face detector output")
|
50 |
+
flags.DEFINE_bool('approx_maps', True, 'use heatmap approximation - major speed up')
|
51 |
+
flags.DEFINE_float('win_mult', 3.33335, 'gaussian filter size for approx maps: 2 * sigma * win_mult + 1')
|
52 |
+
|
53 |
+
# optimization parameters
|
54 |
+
flags.DEFINE_integer('train_iter', 100000, 'maximum training iterations')
|
55 |
+
flags.DEFINE_integer('batch_size', 10, "batch_size")
|
56 |
+
flags.DEFINE_float('learning_rate', 1e-4, "initial learning rate")
|
57 |
+
flags.DEFINE_bool('adam_optimizer', True, "use adam optimizer (if False momentum optimizer is used)")
|
58 |
+
flags.DEFINE_float('momentum', 0.95, "optimizer momentum (if adam_optimizer==False)")
|
59 |
+
flags.DEFINE_integer('step', 100000, 'step for lr decay')
|
60 |
+
flags.DEFINE_float('gamma', 0.1, 'exponential base for lr decay')
|
61 |
+
flags.DEFINE_float('reg', 0, 'scalar multiplier for weight decay (0 to disable)')
|
62 |
+
flags.DEFINE_string('weight_initializer','xavier', 'weight initializer: random_normal / xavier')
|
63 |
+
flags.DEFINE_float('weight_initializer_std', 0.01, 'std for random_normal weight initializer')
|
64 |
+
flags.DEFINE_float('bias_initializer', 0.0, 'constant value for bias initializer')
|
65 |
+
|
66 |
+
# augmentation parameters
|
67 |
+
flags.DEFINE_bool('augment_basic', True,"use basic augmentation?")
|
68 |
+
flags.DEFINE_integer('basic_start', 0, 'min epoch to start basic augmentation')
|
69 |
+
flags.DEFINE_bool('augment_texture', False, "use artistic texture augmentation?")
|
70 |
+
flags.DEFINE_float('p_texture', 0., 'initial probability of artistic texture augmentation')
|
71 |
+
flags.DEFINE_bool('augment_geom', False,"use artistic geometric augmentation?")
|
72 |
+
flags.DEFINE_float('p_geom', 0., 'initial probability of artistic geometric augmentation')
|
73 |
+
flags.DEFINE_integer('artistic_step', -1, 'step for increasing probability of artistic augmentation in epochs')
|
74 |
+
flags.DEFINE_integer('artistic_start', 0, 'min epoch to start artistic augmentation')
|
75 |
+
|
76 |
+
|
77 |
+
FLAGS = flags.FLAGS
|
78 |
+
|
79 |
+
if not os.path.exists(FLAGS.output_dir):
|
80 |
+
os.mkdir(FLAGS.output_dir)
|
81 |
+
|
82 |
+
|
83 |
+
def main(_):
|
84 |
+
|
85 |
+
for i, param in enumerate(params):
|
86 |
+
|
87 |
+
test_dir = os.path.join(FLAGS.output_dir, str(param))
|
88 |
+
if not os.path.exists(test_dir):
|
89 |
+
os.mkdir(test_dir)
|
90 |
+
|
91 |
+
print ('\n##### RUNNING TESTS PRIMARY (%d/%d) #####' % (i + 1, len(params)))
|
92 |
+
print ('##### current directory: ' + test_dir)
|
93 |
+
|
94 |
+
save_model_path = os.path.join(test_dir,'model')
|
95 |
+
save_sample_path = os.path.join(test_dir, 'sample')
|
96 |
+
save_log_path = os.path.join(test_dir, 'logs')
|
97 |
+
|
98 |
+
# create directories if not exist
|
99 |
+
if not os.path.exists(save_model_path):
|
100 |
+
os.mkdir(save_model_path)
|
101 |
+
if not os.path.exists(save_log_path):
|
102 |
+
os.mkdir(save_log_path)
|
103 |
+
if not os.path.exists(save_sample_path) and not FLAGS.sample_to_log:
|
104 |
+
os.mkdir(save_sample_path)
|
105 |
+
|
106 |
+
tf.reset_default_graph() # reset graph
|
107 |
+
|
108 |
+
model = DeepHeatmapsModel(
|
109 |
+
mode='TRAIN', train_iter=FLAGS.train_iter, batch_size=FLAGS.batch_size, learning_rate=param,
|
110 |
+
adam_optimizer=FLAGS.adam_optimizer, momentum=FLAGS.momentum, step=FLAGS.step, gamma=FLAGS.gamma,
|
111 |
+
reg=FLAGS.reg, weight_initializer=FLAGS.weight_initializer, weight_initializer_std=FLAGS.weight_initializer_std,
|
112 |
+
bias_initializer=FLAGS.bias_initializer, image_size=FLAGS.image_size, c_dim=FLAGS.c_dim,
|
113 |
+
num_landmarks=FLAGS.num_landmarks, sigma=FLAGS.sigma, scale=FLAGS.scale, margin=FLAGS.margin,
|
114 |
+
bb_type=FLAGS.bb_type, approx_maps=FLAGS.approx_maps, win_mult=FLAGS.win_mult,
|
115 |
+
augment_basic=FLAGS.augment_basic, basic_start=FLAGS.basic_start, augment_texture=FLAGS.augment_texture,
|
116 |
+
p_texture=FLAGS.p_texture, augment_geom=FLAGS.augment_geom, p_geom=FLAGS.p_geom,
|
117 |
+
artistic_step=FLAGS.artistic_step, artistic_start=FLAGS.artistic_start, output_dir=FLAGS.output_dir,
|
118 |
+
save_model_path=save_model_path, save_sample_path=save_sample_path, save_log_path=save_log_path,
|
119 |
+
test_model_path=FLAGS.test_model_path, pre_train_path=os.path.join(save_model_path, FLAGS.pre_train_model_name),
|
120 |
+
load_pretrain=FLAGS.load_pretrain, img_path=FLAGS.img_path, test_data=FLAGS.test_data,
|
121 |
+
valid_data=FLAGS.valid_data, valid_size=FLAGS.valid_size, log_valid_every=FLAGS.log_valid_every,
|
122 |
+
train_crop_dir=FLAGS.train_crop_dir, img_dir_ns=FLAGS.img_dir_ns, print_every=FLAGS.print_every,
|
123 |
+
save_every=FLAGS.save_every, sample_every=FLAGS.sample_every, sample_grid=FLAGS.sample_grid,
|
124 |
+
sample_to_log=FLAGS.sample_to_log, debug_data_size=FLAGS.debug_data_size, debug=FLAGS.debug,
|
125 |
+
use_epoch_data=FLAGS.use_epoch_data, epoch_data_dir=FLAGS.epoch_data_dir)
|
126 |
+
|
127 |
+
model.train()
|
128 |
+
|
129 |
+
if __name__ == '__main__':
|
130 |
+
tf.app.run()
|
MakeItTalk/thirdparty/face_of_art/ops.py
CHANGED
@@ -1,99 +1,99 @@
|
|
1 |
-
import tensorflow as tf
|
2 |
-
import numpy as np
|
3 |
-
|
4 |
-
|
5 |
-
def conv_relu_pool(input, conv_ker, conv_filters, conv_stride=1, conv_padding='SAME',
|
6 |
-
conv_ker_init=tf.random_normal_initializer(0.01), conv_bias_init=tf.zeros_initializer(),
|
7 |
-
pool_size=2, pool_stride=2, pool_padding='same', var_scope='layer', reuse=None):
|
8 |
-
|
9 |
-
with tf.variable_scope(var_scope):
|
10 |
-
conv = tf.layers.conv2d(input, filters=conv_filters, kernel_size=[conv_ker, conv_ker],
|
11 |
-
strides=conv_stride, padding=conv_padding, bias_initializer=conv_bias_init,
|
12 |
-
kernel_initializer=conv_ker_init, name='conv', reuse=reuse)
|
13 |
-
relu = tf.nn.relu(conv, name='relu')
|
14 |
-
out = tf.layers.max_pooling2d(relu, pool_size=(pool_size, pool_size),
|
15 |
-
strides=(pool_stride, pool_stride), padding=pool_padding, name='pool')
|
16 |
-
return out
|
17 |
-
|
18 |
-
|
19 |
-
def conv_relu(input, conv_ker, conv_filters, conv_stride=1, conv_dilation=1, conv_padding='SAME',
|
20 |
-
conv_ker_init=tf.random_normal_initializer(0.01), conv_bias_init=tf.zeros_initializer(),
|
21 |
-
var_scope='layer', reuse=None):
|
22 |
-
|
23 |
-
with tf.variable_scope(var_scope):
|
24 |
-
conv = tf.layers.conv2d(input, filters=conv_filters, kernel_size=[conv_ker, conv_ker],
|
25 |
-
strides=conv_stride, dilation_rate=conv_dilation, padding=conv_padding,
|
26 |
-
bias_initializer=conv_bias_init, kernel_initializer=conv_ker_init, name='conv',
|
27 |
-
reuse=reuse)
|
28 |
-
out = tf.nn.relu(conv, name='relu')
|
29 |
-
return out
|
30 |
-
|
31 |
-
|
32 |
-
def conv(input, conv_ker, conv_filters, conv_stride=1, conv_dilation=1, conv_padding='SAME',
|
33 |
-
conv_ker_init=tf.random_normal_initializer(0.01), conv_bias_init=tf.zeros_initializer(),
|
34 |
-
var_scope='layer', reuse=None):
|
35 |
-
|
36 |
-
with tf.variable_scope(var_scope):
|
37 |
-
out = tf.layers.conv2d(input, filters=conv_filters, kernel_size=[conv_ker, conv_ker],
|
38 |
-
strides=conv_stride, dilation_rate=conv_dilation, padding=conv_padding,
|
39 |
-
bias_initializer=conv_bias_init, kernel_initializer=conv_ker_init, name='conv',
|
40 |
-
reuse=reuse)
|
41 |
-
return out
|
42 |
-
|
43 |
-
|
44 |
-
def deconv(input, conv_ker, conv_filters, conv_stride=1, conv_padding='SAME',
|
45 |
-
conv_ker_init=tf.random_normal_initializer(0.01), conv_bias_init=tf.zeros_initializer(),
|
46 |
-
var_scope='layer', reuse=None):
|
47 |
-
|
48 |
-
with tf.variable_scope(var_scope):
|
49 |
-
out = tf.layers.conv2d_transpose(input, filters=conv_filters, kernel_size=[conv_ker, conv_ker],
|
50 |
-
strides=conv_stride, padding=conv_padding, bias_initializer=conv_bias_init,
|
51 |
-
kernel_initializer=conv_ker_init, name='deconv', reuse=reuse)
|
52 |
-
return out
|
53 |
-
|
54 |
-
|
55 |
-
def deconv2d_bilinear_upsampling_initializer(shape):
|
56 |
-
"""Returns the initializer that can be passed to DeConv2dLayer for initializ ingthe
|
57 |
-
weights in correspondence to channel-wise bilinear up-sampling.
|
58 |
-
Used in segmentation approaches such as [FCN](https://arxiv.org/abs/1605.06211)
|
59 |
-
Parameters
|
60 |
-
----------
|
61 |
-
shape : tuple of int
|
62 |
-
The shape of the filters, [height, width, output_channels, in_channels].
|
63 |
-
It must match the shape passed to DeConv2dLayer.
|
64 |
-
Returns
|
65 |
-
-------
|
66 |
-
``tf.constant_initializer``
|
67 |
-
A constant initializer with weights set to correspond to per channel bilinear upsampling
|
68 |
-
when passed as W_int in DeConv2dLayer
|
69 |
-
--------
|
70 |
-
from: tensorlayer
|
71 |
-
https://github.com/tensorlayer/tensorlayer/blob/c7a1a4924219244c71048709ca729aca0c34c453/tensorlayer/layers/convolution.py
|
72 |
-
"""
|
73 |
-
if shape[0] != shape[1]:
|
74 |
-
raise Exception('deconv2d_bilinear_upsampling_initializer only supports symmetrical filter sizes')
|
75 |
-
if shape[3] < shape[2]:
|
76 |
-
raise Exception('deconv2d_bilinear_upsampling_initializer behaviour is not defined for num_in_channels < num_out_channels ')
|
77 |
-
|
78 |
-
filter_size = shape[0]
|
79 |
-
num_out_channels = shape[2]
|
80 |
-
num_in_channels = shape[3]
|
81 |
-
|
82 |
-
# Create bilinear filter kernel as numpy array
|
83 |
-
bilinear_kernel = np.zeros([filter_size, filter_size], dtype=np.float32)
|
84 |
-
scale_factor = (filter_size + 1) // 2
|
85 |
-
if filter_size % 2 == 1:
|
86 |
-
center = scale_factor - 1
|
87 |
-
else:
|
88 |
-
center = scale_factor - 0.5
|
89 |
-
for x in range(filter_size):
|
90 |
-
for y in range(filter_size):
|
91 |
-
bilinear_kernel[x, y] = (1 - abs(x - center) / scale_factor) * \
|
92 |
-
(1 - abs(y - center) / scale_factor)
|
93 |
-
weights = np.zeros((filter_size, filter_size, num_out_channels, num_in_channels))
|
94 |
-
for i in range(num_out_channels):
|
95 |
-
weights[:, :, i, i] = bilinear_kernel
|
96 |
-
|
97 |
-
# assign numpy array to constant_initalizer and pass to get_variable
|
98 |
-
bilinear_weights_init = tf.constant_initializer(value=weights, dtype=tf.float32)
|
99 |
return bilinear_weights_init
|
|
|
1 |
+
import tensorflow as tf
|
2 |
+
import numpy as np
|
3 |
+
|
4 |
+
|
5 |
+
def conv_relu_pool(input, conv_ker, conv_filters, conv_stride=1, conv_padding='SAME',
|
6 |
+
conv_ker_init=tf.random_normal_initializer(0.01), conv_bias_init=tf.zeros_initializer(),
|
7 |
+
pool_size=2, pool_stride=2, pool_padding='same', var_scope='layer', reuse=None):
|
8 |
+
|
9 |
+
with tf.variable_scope(var_scope):
|
10 |
+
conv = tf.layers.conv2d(input, filters=conv_filters, kernel_size=[conv_ker, conv_ker],
|
11 |
+
strides=conv_stride, padding=conv_padding, bias_initializer=conv_bias_init,
|
12 |
+
kernel_initializer=conv_ker_init, name='conv', reuse=reuse)
|
13 |
+
relu = tf.nn.relu(conv, name='relu')
|
14 |
+
out = tf.layers.max_pooling2d(relu, pool_size=(pool_size, pool_size),
|
15 |
+
strides=(pool_stride, pool_stride), padding=pool_padding, name='pool')
|
16 |
+
return out
|
17 |
+
|
18 |
+
|
19 |
+
def conv_relu(input, conv_ker, conv_filters, conv_stride=1, conv_dilation=1, conv_padding='SAME',
|
20 |
+
conv_ker_init=tf.random_normal_initializer(0.01), conv_bias_init=tf.zeros_initializer(),
|
21 |
+
var_scope='layer', reuse=None):
|
22 |
+
|
23 |
+
with tf.variable_scope(var_scope):
|
24 |
+
conv = tf.layers.conv2d(input, filters=conv_filters, kernel_size=[conv_ker, conv_ker],
|
25 |
+
strides=conv_stride, dilation_rate=conv_dilation, padding=conv_padding,
|
26 |
+
bias_initializer=conv_bias_init, kernel_initializer=conv_ker_init, name='conv',
|
27 |
+
reuse=reuse)
|
28 |
+
out = tf.nn.relu(conv, name='relu')
|
29 |
+
return out
|
30 |
+
|
31 |
+
|
32 |
+
def conv(input, conv_ker, conv_filters, conv_stride=1, conv_dilation=1, conv_padding='SAME',
|
33 |
+
conv_ker_init=tf.random_normal_initializer(0.01), conv_bias_init=tf.zeros_initializer(),
|
34 |
+
var_scope='layer', reuse=None):
|
35 |
+
|
36 |
+
with tf.variable_scope(var_scope):
|
37 |
+
out = tf.layers.conv2d(input, filters=conv_filters, kernel_size=[conv_ker, conv_ker],
|
38 |
+
strides=conv_stride, dilation_rate=conv_dilation, padding=conv_padding,
|
39 |
+
bias_initializer=conv_bias_init, kernel_initializer=conv_ker_init, name='conv',
|
40 |
+
reuse=reuse)
|
41 |
+
return out
|
42 |
+
|
43 |
+
|
44 |
+
def deconv(input, conv_ker, conv_filters, conv_stride=1, conv_padding='SAME',
|
45 |
+
conv_ker_init=tf.random_normal_initializer(0.01), conv_bias_init=tf.zeros_initializer(),
|
46 |
+
var_scope='layer', reuse=None):
|
47 |
+
|
48 |
+
with tf.variable_scope(var_scope):
|
49 |
+
out = tf.layers.conv2d_transpose(input, filters=conv_filters, kernel_size=[conv_ker, conv_ker],
|
50 |
+
strides=conv_stride, padding=conv_padding, bias_initializer=conv_bias_init,
|
51 |
+
kernel_initializer=conv_ker_init, name='deconv', reuse=reuse)
|
52 |
+
return out
|
53 |
+
|
54 |
+
|
55 |
+
def deconv2d_bilinear_upsampling_initializer(shape):
|
56 |
+
"""Returns the initializer that can be passed to DeConv2dLayer for initializ ingthe
|
57 |
+
weights in correspondence to channel-wise bilinear up-sampling.
|
58 |
+
Used in segmentation approaches such as [FCN](https://arxiv.org/abs/1605.06211)
|
59 |
+
Parameters
|
60 |
+
----------
|
61 |
+
shape : tuple of int
|
62 |
+
The shape of the filters, [height, width, output_channels, in_channels].
|
63 |
+
It must match the shape passed to DeConv2dLayer.
|
64 |
+
Returns
|
65 |
+
-------
|
66 |
+
``tf.constant_initializer``
|
67 |
+
A constant initializer with weights set to correspond to per channel bilinear upsampling
|
68 |
+
when passed as W_int in DeConv2dLayer
|
69 |
+
--------
|
70 |
+
from: tensorlayer
|
71 |
+
https://github.com/tensorlayer/tensorlayer/blob/c7a1a4924219244c71048709ca729aca0c34c453/tensorlayer/layers/convolution.py
|
72 |
+
"""
|
73 |
+
if shape[0] != shape[1]:
|
74 |
+
raise Exception('deconv2d_bilinear_upsampling_initializer only supports symmetrical filter sizes')
|
75 |
+
if shape[3] < shape[2]:
|
76 |
+
raise Exception('deconv2d_bilinear_upsampling_initializer behaviour is not defined for num_in_channels < num_out_channels ')
|
77 |
+
|
78 |
+
filter_size = shape[0]
|
79 |
+
num_out_channels = shape[2]
|
80 |
+
num_in_channels = shape[3]
|
81 |
+
|
82 |
+
# Create bilinear filter kernel as numpy array
|
83 |
+
bilinear_kernel = np.zeros([filter_size, filter_size], dtype=np.float32)
|
84 |
+
scale_factor = (filter_size + 1) // 2
|
85 |
+
if filter_size % 2 == 1:
|
86 |
+
center = scale_factor - 1
|
87 |
+
else:
|
88 |
+
center = scale_factor - 0.5
|
89 |
+
for x in range(filter_size):
|
90 |
+
for y in range(filter_size):
|
91 |
+
bilinear_kernel[x, y] = (1 - abs(x - center) / scale_factor) * \
|
92 |
+
(1 - abs(y - center) / scale_factor)
|
93 |
+
weights = np.zeros((filter_size, filter_size, num_out_channels, num_in_channels))
|
94 |
+
for i in range(num_out_channels):
|
95 |
+
weights[:, :, i, i] = bilinear_kernel
|
96 |
+
|
97 |
+
# assign numpy array to constant_initalizer and pass to get_variable
|
98 |
+
bilinear_weights_init = tf.constant_initializer(value=weights, dtype=tf.float32)
|
99 |
return bilinear_weights_init
|
MakeItTalk/thirdparty/face_of_art/pdm_clm_functions.py
CHANGED
@@ -1,203 +1,203 @@
|
|
1 |
-
from thirdparty.face_of_art.logging_functions import *
|
2 |
-
import os
|
3 |
-
import numpy as np
|
4 |
-
from menpo.shape import PointCloud
|
5 |
-
from menpofit.clm import GradientDescentCLMFitter
|
6 |
-
import pickle
|
7 |
-
import math
|
8 |
-
|
9 |
-
jaw_line_inds = np.arange(0, 17)
|
10 |
-
nose_inds = np.arange(27, 36)
|
11 |
-
left_eye_inds = np.arange(36, 42)
|
12 |
-
right_eye_inds = np.arange(42, 48)
|
13 |
-
left_brow_inds = np.arange(17, 22)
|
14 |
-
right_brow_inds = np.arange(22, 27)
|
15 |
-
mouth_inds = np.arange(48, 68)
|
16 |
-
|
17 |
-
|
18 |
-
def sigmoid(x, rate, offset):
|
19 |
-
return 1 / (1 + math.exp(-rate * (x - offset)))
|
20 |
-
|
21 |
-
|
22 |
-
def calculate_evidence(patch_responses, rate=0.25, offset=20):
|
23 |
-
# from ECT: https://github.com/HongwenZhang/ECT-FaceAlignment
|
24 |
-
|
25 |
-
rspmapShape = patch_responses[0, 0, ...].shape
|
26 |
-
n_points = patch_responses.shape[0]
|
27 |
-
|
28 |
-
y_weight = [np.sum(patch_responses[i, 0, ...], axis=1) for i in range(n_points)]
|
29 |
-
x_weight = [np.sum(patch_responses[i, 0, ...], axis=0) for i in range(n_points)]
|
30 |
-
|
31 |
-
# y_weight /= y_weight.sum()
|
32 |
-
# x_weight /= x_weight.sum()
|
33 |
-
|
34 |
-
y_coordinate = range(0, rspmapShape[0])
|
35 |
-
x_coordinate = range(0, rspmapShape[1])
|
36 |
-
|
37 |
-
varList = [(np.abs(
|
38 |
-
np.average((y_coordinate - np.average(y_coordinate, weights=y_weight[i])) ** 2, weights=y_weight[i])),
|
39 |
-
np.abs(np.average((x_coordinate - np.average(x_coordinate, weights=x_weight[i])) ** 2,
|
40 |
-
weights=x_weight[i])))
|
41 |
-
for i in range(n_points)]
|
42 |
-
|
43 |
-
# patch_responses[patch_responses<0.001] = 0
|
44 |
-
prpList = [
|
45 |
-
(np.sum(patch_responses[i, 0, ...], axis=(-1, -2)), np.sum(patch_responses[i, 0, ...], axis=(-1, -2)))
|
46 |
-
for i in range(n_points)]
|
47 |
-
|
48 |
-
var = np.array(varList).flatten()
|
49 |
-
var[var == 0] = np.finfo(float).eps
|
50 |
-
var = np.sqrt(var)
|
51 |
-
var = 1 / var
|
52 |
-
|
53 |
-
weight = np.array(prpList).flatten()
|
54 |
-
weight *= var
|
55 |
-
|
56 |
-
# offset = np.average(weight) - 20
|
57 |
-
weight = [sigmoid(i, rate, offset) for i in weight]
|
58 |
-
|
59 |
-
weight = np.array(weight)
|
60 |
-
|
61 |
-
return weight
|
62 |
-
|
63 |
-
|
64 |
-
def get_patches_around_landmarks(heat_maps, menpo_shape, patch_size=(30,30), image_shape=256):
|
65 |
-
# from ECT: https://github.com/HongwenZhang/ECT-FaceAlignment
|
66 |
-
|
67 |
-
padH = int(image_shape / 2)
|
68 |
-
padW = int(image_shape / 2)
|
69 |
-
|
70 |
-
rps_zeros = np.zeros((1, 2 * image_shape, 2 * image_shape, menpo_shape.n_points))
|
71 |
-
rps_zeros[0, padH:padH + image_shape, padW:padW + image_shape, :] = heat_maps
|
72 |
-
|
73 |
-
rOffset = np.floor(patch_size[0] / 2).astype(int)
|
74 |
-
lOffset = patch_size[0] - rOffset
|
75 |
-
|
76 |
-
rspList = [rps_zeros[0, y - rOffset:y + lOffset, x - rOffset:x + lOffset, i] for i in range(menpo_shape.n_points)
|
77 |
-
for y in [np.around(menpo_shape.points[i][0] + 1 + padH).astype(int)]
|
78 |
-
for x in [np.around(menpo_shape.points[i][1] + 1 + padW).astype(int)]]
|
79 |
-
patches = np.array(rspList)[:, None, :, :]
|
80 |
-
return patches
|
81 |
-
|
82 |
-
|
83 |
-
def pdm_correct(init_shape, pdm_model, part_inds=None):
|
84 |
-
""" correct landmarks using pdm (point distribution model)"""
|
85 |
-
pdm_model.set_target(PointCloud(init_shape))
|
86 |
-
if part_inds is None:
|
87 |
-
return pdm_model.target.points
|
88 |
-
else:
|
89 |
-
return pdm_model.target.points[part_inds]
|
90 |
-
|
91 |
-
|
92 |
-
def weighted_pdm_transform(input_pdm_model, patches, shape, inirho=20):
|
93 |
-
# from ECT: https://github.com/HongwenZhang/ECT-FaceAlignment
|
94 |
-
weight = calculate_evidence(patches, rate=0.5, offset=10).reshape((1, -1))
|
95 |
-
pdm_model = input_pdm_model.copy()
|
96 |
-
|
97 |
-
# write project_weight
|
98 |
-
ini_rho2_inv_prior = np.hstack((np.zeros((4,)), inirho / pdm_model.model.eigenvalues))
|
99 |
-
J = np.rollaxis(pdm_model.d_dp(None), -1, 1)
|
100 |
-
J = J.reshape((-1, J.shape[-1]))
|
101 |
-
|
102 |
-
initial_shape_mean = shape.points.ravel() - pdm_model.model._mean
|
103 |
-
iniJe = - J.T.dot(initial_shape_mean * weight[0])
|
104 |
-
iniJWJ = J.T.dot(np.diag(weight[0]).dot(J))
|
105 |
-
inv_JJ = np.linalg.inv(iniJWJ + np.diag(ini_rho2_inv_prior))
|
106 |
-
initial_p = -inv_JJ.dot(iniJe)
|
107 |
-
|
108 |
-
# Update pdm
|
109 |
-
pdm_model._from_vector_inplace(initial_p)
|
110 |
-
return pdm_model.target.points
|
111 |
-
|
112 |
-
|
113 |
-
def w_pdm_correct(init_shape, patches, pdm_model, part_inds=None):
|
114 |
-
""" correct landmarks using weighted pdm"""
|
115 |
-
|
116 |
-
points = weighted_pdm_transform(input_pdm_model=pdm_model, patches=patches, shape=PointCloud(init_shape))
|
117 |
-
|
118 |
-
if (part_inds is not None and pdm_model.n_points < 68) or part_inds is None:
|
119 |
-
return points
|
120 |
-
else:
|
121 |
-
return points[part_inds]
|
122 |
-
|
123 |
-
|
124 |
-
def feature_based_pdm_corr(lms_init, models_dir, train_type='basic', patches=None):
|
125 |
-
""" correct landmarks using part-based pdm"""
|
126 |
-
|
127 |
-
jaw_line_inds = np.arange(0, 17)
|
128 |
-
nose_inds = np.arange(27, 36)
|
129 |
-
left_eye_inds = np.arange(36, 42)
|
130 |
-
right_eye_inds = np.arange(42, 48)
|
131 |
-
left_brow_inds = np.arange(17, 22)
|
132 |
-
right_brow_inds = np.arange(22, 27)
|
133 |
-
mouth_inds = np.arange(48, 68)
|
134 |
-
|
135 |
-
'''
|
136 |
-
selected number of PCs:
|
137 |
-
jaw:7
|
138 |
-
eye:3
|
139 |
-
brow:2
|
140 |
-
nose:5
|
141 |
-
mouth:7
|
142 |
-
'''
|
143 |
-
|
144 |
-
new_lms = np.zeros((68, 2))
|
145 |
-
|
146 |
-
parts = ['l_brow', 'r_brow', 'l_eye', 'r_eye', 'mouth', 'nose', 'jaw']
|
147 |
-
part_inds_opt = [left_brow_inds, right_brow_inds, left_eye_inds, right_eye_inds, mouth_inds, nose_inds,
|
148 |
-
jaw_line_inds]
|
149 |
-
pc_opt = [2, 2, 3, 3, 7, 5, 7]
|
150 |
-
|
151 |
-
for i, part in enumerate(parts):
|
152 |
-
part_inds = part_inds_opt[i]
|
153 |
-
pc = pc_opt[i]
|
154 |
-
temp_model = os.path.join(models_dir, train_type + '_' + part + '_' + str(pc))
|
155 |
-
filehandler = open(temp_model, "rb")
|
156 |
-
try:
|
157 |
-
pdm_temp = pickle.load(filehandler)
|
158 |
-
except UnicodeDecodeError:
|
159 |
-
pdm_temp = pickle.load(filehandler, fix_imports=True, encoding="latin1")
|
160 |
-
filehandler.close()
|
161 |
-
|
162 |
-
if patches is None:
|
163 |
-
part_lms_pdm = pdm_correct(lms_init[part_inds], pdm_temp)
|
164 |
-
else:
|
165 |
-
part_lms_pdm = w_pdm_correct(
|
166 |
-
init_shape=lms_init[part_inds], patches=patches, pdm_model=pdm_temp, part_inds=part_inds)
|
167 |
-
|
168 |
-
new_lms[part_inds] = part_lms_pdm
|
169 |
-
return new_lms
|
170 |
-
|
171 |
-
|
172 |
-
def clm_correct(clm_model_path, image, map, lms_init):
|
173 |
-
""" tune landmarks using clm (constrained local model)"""
|
174 |
-
|
175 |
-
filehandler = open(os.path.join(clm_model_path), "rb")
|
176 |
-
try:
|
177 |
-
part_model = pickle.load(filehandler)
|
178 |
-
except UnicodeDecodeError:
|
179 |
-
part_model = pickle.load(filehandler, fix_imports=True, encoding="latin1")
|
180 |
-
filehandler.close()
|
181 |
-
|
182 |
-
# from ECT: https://github.com/HongwenZhang/ECT-FaceAlignment
|
183 |
-
part_model.opt = dict()
|
184 |
-
part_model.opt['numIter'] = 5
|
185 |
-
part_model.opt['kernel_covariance'] = 10
|
186 |
-
part_model.opt['sigOffset'] = 25
|
187 |
-
part_model.opt['sigRate'] = 0.25
|
188 |
-
part_model.opt['pdm_rho'] = 20
|
189 |
-
part_model.opt['verbose'] = False
|
190 |
-
part_model.opt['rho2'] = 20
|
191 |
-
part_model.opt['ablation'] = (True, True)
|
192 |
-
part_model.opt['ratio1'] = 0.12
|
193 |
-
part_model.opt['ratio2'] = 0.08
|
194 |
-
part_model.opt['smooth'] = True
|
195 |
-
|
196 |
-
fitter = GradientDescentCLMFitter(part_model, n_shape=30)
|
197 |
-
|
198 |
-
image.rspmap_data = np.swapaxes(np.swapaxes(map, 1, 3), 2, 3)
|
199 |
-
|
200 |
-
fr = fitter.fit_from_shape(image=image, initial_shape=PointCloud(lms_init), gt_shape=PointCloud(lms_init))
|
201 |
-
w_pdm_clm = fr.final_shape.points
|
202 |
-
|
203 |
-
return w_pdm_clm
|
|
|
1 |
+
from thirdparty.face_of_art.logging_functions import *
|
2 |
+
import os
|
3 |
+
import numpy as np
|
4 |
+
from menpo.shape import PointCloud
|
5 |
+
from menpofit.clm import GradientDescentCLMFitter
|
6 |
+
import pickle
|
7 |
+
import math
|
8 |
+
|
9 |
+
jaw_line_inds = np.arange(0, 17)
|
10 |
+
nose_inds = np.arange(27, 36)
|
11 |
+
left_eye_inds = np.arange(36, 42)
|
12 |
+
right_eye_inds = np.arange(42, 48)
|
13 |
+
left_brow_inds = np.arange(17, 22)
|
14 |
+
right_brow_inds = np.arange(22, 27)
|
15 |
+
mouth_inds = np.arange(48, 68)
|
16 |
+
|
17 |
+
|
18 |
+
def sigmoid(x, rate, offset):
|
19 |
+
return 1 / (1 + math.exp(-rate * (x - offset)))
|
20 |
+
|
21 |
+
|
22 |
+
def calculate_evidence(patch_responses, rate=0.25, offset=20):
|
23 |
+
# from ECT: https://github.com/HongwenZhang/ECT-FaceAlignment
|
24 |
+
|
25 |
+
rspmapShape = patch_responses[0, 0, ...].shape
|
26 |
+
n_points = patch_responses.shape[0]
|
27 |
+
|
28 |
+
y_weight = [np.sum(patch_responses[i, 0, ...], axis=1) for i in range(n_points)]
|
29 |
+
x_weight = [np.sum(patch_responses[i, 0, ...], axis=0) for i in range(n_points)]
|
30 |
+
|
31 |
+
# y_weight /= y_weight.sum()
|
32 |
+
# x_weight /= x_weight.sum()
|
33 |
+
|
34 |
+
y_coordinate = range(0, rspmapShape[0])
|
35 |
+
x_coordinate = range(0, rspmapShape[1])
|
36 |
+
|
37 |
+
varList = [(np.abs(
|
38 |
+
np.average((y_coordinate - np.average(y_coordinate, weights=y_weight[i])) ** 2, weights=y_weight[i])),
|
39 |
+
np.abs(np.average((x_coordinate - np.average(x_coordinate, weights=x_weight[i])) ** 2,
|
40 |
+
weights=x_weight[i])))
|
41 |
+
for i in range(n_points)]
|
42 |
+
|
43 |
+
# patch_responses[patch_responses<0.001] = 0
|
44 |
+
prpList = [
|
45 |
+
(np.sum(patch_responses[i, 0, ...], axis=(-1, -2)), np.sum(patch_responses[i, 0, ...], axis=(-1, -2)))
|
46 |
+
for i in range(n_points)]
|
47 |
+
|
48 |
+
var = np.array(varList).flatten()
|
49 |
+
var[var == 0] = np.finfo(float).eps
|
50 |
+
var = np.sqrt(var)
|
51 |
+
var = 1 / var
|
52 |
+
|
53 |
+
weight = np.array(prpList).flatten()
|
54 |
+
weight *= var
|
55 |
+
|
56 |
+
# offset = np.average(weight) - 20
|
57 |
+
weight = [sigmoid(i, rate, offset) for i in weight]
|
58 |
+
|
59 |
+
weight = np.array(weight)
|
60 |
+
|
61 |
+
return weight
|
62 |
+
|
63 |
+
|
64 |
+
def get_patches_around_landmarks(heat_maps, menpo_shape, patch_size=(30,30), image_shape=256):
|
65 |
+
# from ECT: https://github.com/HongwenZhang/ECT-FaceAlignment
|
66 |
+
|
67 |
+
padH = int(image_shape / 2)
|
68 |
+
padW = int(image_shape / 2)
|
69 |
+
|
70 |
+
rps_zeros = np.zeros((1, 2 * image_shape, 2 * image_shape, menpo_shape.n_points))
|
71 |
+
rps_zeros[0, padH:padH + image_shape, padW:padW + image_shape, :] = heat_maps
|
72 |
+
|
73 |
+
rOffset = np.floor(patch_size[0] / 2).astype(int)
|
74 |
+
lOffset = patch_size[0] - rOffset
|
75 |
+
|
76 |
+
rspList = [rps_zeros[0, y - rOffset:y + lOffset, x - rOffset:x + lOffset, i] for i in range(menpo_shape.n_points)
|
77 |
+
for y in [np.around(menpo_shape.points[i][0] + 1 + padH).astype(int)]
|
78 |
+
for x in [np.around(menpo_shape.points[i][1] + 1 + padW).astype(int)]]
|
79 |
+
patches = np.array(rspList)[:, None, :, :]
|
80 |
+
return patches
|
81 |
+
|
82 |
+
|
83 |
+
def pdm_correct(init_shape, pdm_model, part_inds=None):
|
84 |
+
""" correct landmarks using pdm (point distribution model)"""
|
85 |
+
pdm_model.set_target(PointCloud(init_shape))
|
86 |
+
if part_inds is None:
|
87 |
+
return pdm_model.target.points
|
88 |
+
else:
|
89 |
+
return pdm_model.target.points[part_inds]
|
90 |
+
|
91 |
+
|
92 |
+
def weighted_pdm_transform(input_pdm_model, patches, shape, inirho=20):
|
93 |
+
# from ECT: https://github.com/HongwenZhang/ECT-FaceAlignment
|
94 |
+
weight = calculate_evidence(patches, rate=0.5, offset=10).reshape((1, -1))
|
95 |
+
pdm_model = input_pdm_model.copy()
|
96 |
+
|
97 |
+
# write project_weight
|
98 |
+
ini_rho2_inv_prior = np.hstack((np.zeros((4,)), inirho / pdm_model.model.eigenvalues))
|
99 |
+
J = np.rollaxis(pdm_model.d_dp(None), -1, 1)
|
100 |
+
J = J.reshape((-1, J.shape[-1]))
|
101 |
+
|
102 |
+
initial_shape_mean = shape.points.ravel() - pdm_model.model._mean
|
103 |
+
iniJe = - J.T.dot(initial_shape_mean * weight[0])
|
104 |
+
iniJWJ = J.T.dot(np.diag(weight[0]).dot(J))
|
105 |
+
inv_JJ = np.linalg.inv(iniJWJ + np.diag(ini_rho2_inv_prior))
|
106 |
+
initial_p = -inv_JJ.dot(iniJe)
|
107 |
+
|
108 |
+
# Update pdm
|
109 |
+
pdm_model._from_vector_inplace(initial_p)
|
110 |
+
return pdm_model.target.points
|
111 |
+
|
112 |
+
|
113 |
+
def w_pdm_correct(init_shape, patches, pdm_model, part_inds=None):
|
114 |
+
""" correct landmarks using weighted pdm"""
|
115 |
+
|
116 |
+
points = weighted_pdm_transform(input_pdm_model=pdm_model, patches=patches, shape=PointCloud(init_shape))
|
117 |
+
|
118 |
+
if (part_inds is not None and pdm_model.n_points < 68) or part_inds is None:
|
119 |
+
return points
|
120 |
+
else:
|
121 |
+
return points[part_inds]
|
122 |
+
|
123 |
+
|
124 |
+
def feature_based_pdm_corr(lms_init, models_dir, train_type='basic', patches=None):
|
125 |
+
""" correct landmarks using part-based pdm"""
|
126 |
+
|
127 |
+
jaw_line_inds = np.arange(0, 17)
|
128 |
+
nose_inds = np.arange(27, 36)
|
129 |
+
left_eye_inds = np.arange(36, 42)
|
130 |
+
right_eye_inds = np.arange(42, 48)
|
131 |
+
left_brow_inds = np.arange(17, 22)
|
132 |
+
right_brow_inds = np.arange(22, 27)
|
133 |
+
mouth_inds = np.arange(48, 68)
|
134 |
+
|
135 |
+
'''
|
136 |
+
selected number of PCs:
|
137 |
+
jaw:7
|
138 |
+
eye:3
|
139 |
+
brow:2
|
140 |
+
nose:5
|
141 |
+
mouth:7
|
142 |
+
'''
|
143 |
+
|
144 |
+
new_lms = np.zeros((68, 2))
|
145 |
+
|
146 |
+
parts = ['l_brow', 'r_brow', 'l_eye', 'r_eye', 'mouth', 'nose', 'jaw']
|
147 |
+
part_inds_opt = [left_brow_inds, right_brow_inds, left_eye_inds, right_eye_inds, mouth_inds, nose_inds,
|
148 |
+
jaw_line_inds]
|
149 |
+
pc_opt = [2, 2, 3, 3, 7, 5, 7]
|
150 |
+
|
151 |
+
for i, part in enumerate(parts):
|
152 |
+
part_inds = part_inds_opt[i]
|
153 |
+
pc = pc_opt[i]
|
154 |
+
temp_model = os.path.join(models_dir, train_type + '_' + part + '_' + str(pc))
|
155 |
+
filehandler = open(temp_model, "rb")
|
156 |
+
try:
|
157 |
+
pdm_temp = pickle.load(filehandler)
|
158 |
+
except UnicodeDecodeError:
|
159 |
+
pdm_temp = pickle.load(filehandler, fix_imports=True, encoding="latin1")
|
160 |
+
filehandler.close()
|
161 |
+
|
162 |
+
if patches is None:
|
163 |
+
part_lms_pdm = pdm_correct(lms_init[part_inds], pdm_temp)
|
164 |
+
else:
|
165 |
+
part_lms_pdm = w_pdm_correct(
|
166 |
+
init_shape=lms_init[part_inds], patches=patches, pdm_model=pdm_temp, part_inds=part_inds)
|
167 |
+
|
168 |
+
new_lms[part_inds] = part_lms_pdm
|
169 |
+
return new_lms
|
170 |
+
|
171 |
+
|
172 |
+
def clm_correct(clm_model_path, image, map, lms_init):
|
173 |
+
""" tune landmarks using clm (constrained local model)"""
|
174 |
+
|
175 |
+
filehandler = open(os.path.join(clm_model_path), "rb")
|
176 |
+
try:
|
177 |
+
part_model = pickle.load(filehandler)
|
178 |
+
except UnicodeDecodeError:
|
179 |
+
part_model = pickle.load(filehandler, fix_imports=True, encoding="latin1")
|
180 |
+
filehandler.close()
|
181 |
+
|
182 |
+
# from ECT: https://github.com/HongwenZhang/ECT-FaceAlignment
|
183 |
+
part_model.opt = dict()
|
184 |
+
part_model.opt['numIter'] = 5
|
185 |
+
part_model.opt['kernel_covariance'] = 10
|
186 |
+
part_model.opt['sigOffset'] = 25
|
187 |
+
part_model.opt['sigRate'] = 0.25
|
188 |
+
part_model.opt['pdm_rho'] = 20
|
189 |
+
part_model.opt['verbose'] = False
|
190 |
+
part_model.opt['rho2'] = 20
|
191 |
+
part_model.opt['ablation'] = (True, True)
|
192 |
+
part_model.opt['ratio1'] = 0.12
|
193 |
+
part_model.opt['ratio2'] = 0.08
|
194 |
+
part_model.opt['smooth'] = True
|
195 |
+
|
196 |
+
fitter = GradientDescentCLMFitter(part_model, n_shape=30)
|
197 |
+
|
198 |
+
image.rspmap_data = np.swapaxes(np.swapaxes(map, 1, 3), 2, 3)
|
199 |
+
|
200 |
+
fr = fitter.fit_from_shape(image=image, initial_shape=PointCloud(lms_init), gt_shape=PointCloud(lms_init))
|
201 |
+
w_pdm_clm = fr.final_shape.points
|
202 |
+
|
203 |
+
return w_pdm_clm
|
MakeItTalk/thirdparty/face_of_art/pdm_clm_models/clm_models/basic_all
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
MakeItTalk/thirdparty/face_of_art/pdm_clm_models/clm_models/basic_jaw
CHANGED
@@ -1,490 +1,490 @@
|
|
1 |
-
ccopy_reg
|
2 |
-
_reconstructor
|
3 |
-
p0
|
4 |
-
(cmenpofit.clm.base
|
5 |
-
CLM
|
6 |
-
p1
|
7 |
-
c__builtin__
|
8 |
-
object
|
9 |
-
p2
|
10 |
-
Ntp3
|
11 |
-
Rp4
|
12 |
-
(dp5
|
13 |
-
S'opt'
|
14 |
-
p6
|
15 |
-
(dp7
|
16 |
-
S'ablation'
|
17 |
-
p8
|
18 |
-
(I01
|
19 |
-
I01
|
20 |
-
tp9
|
21 |
-
sS'verbose'
|
22 |
-
p10
|
23 |
-
I00
|
24 |
-
sS'rho2'
|
25 |
-
p11
|
26 |
-
I20
|
27 |
-
sS'sigRate'
|
28 |
-
p12
|
29 |
-
F0.25
|
30 |
-
sS'ratio2'
|
31 |
-
p13
|
32 |
-
F0.08
|
33 |
-
sS'imgDir'
|
34 |
-
p14
|
35 |
-
S'/Users/arik/Desktop/artistic_faces/applications/AF_sample'
|
36 |
-
p15
|
37 |
-
sS'dataset'
|
38 |
-
p16
|
39 |
-
S'demo'
|
40 |
-
p17
|
41 |
-
sS'ratio1'
|
42 |
-
p18
|
43 |
-
F0.12
|
44 |
-
sS'smooth'
|
45 |
-
p19
|
46 |
-
I01
|
47 |
-
sS'pdm_rho'
|
48 |
-
p20
|
49 |
-
I20
|
50 |
-
sS'sigOffset'
|
51 |
-
p21
|
52 |
-
I25
|
53 |
-
sS'kernel_covariance'
|
54 |
-
p22
|
55 |
-
I10
|
56 |
-
sS'numIter'
|
57 |
-
p23
|
58 |
-
I5
|
59 |
-
ssS'_shape_model_cls'
|
60 |
-
p24
|
61 |
-
(lp25
|
62 |
-
cmenpofit.modelinstance
|
63 |
-
OrthoPDM
|
64 |
-
p26
|
65 |
-
asS'max_shape_components'
|
66 |
-
p27
|
67 |
-
(lp28
|
68 |
-
NasS'scales'
|
69 |
-
p29
|
70 |
-
(lp30
|
71 |
-
I1
|
72 |
-
asS'diagonal'
|
73 |
-
p31
|
74 |
-
I200
|
75 |
-
sS'holistic_features'
|
76 |
-
p32
|
77 |
-
(lp33
|
78 |
-
cmenpo.feature.features
|
79 |
-
no_op
|
80 |
-
p34
|
81 |
-
asS'patch_shape'
|
82 |
-
p35
|
83 |
-
(lp36
|
84 |
-
(I8
|
85 |
-
I8
|
86 |
-
tp37
|
87 |
-
asS'expert_ensemble_cls'
|
88 |
-
p38
|
89 |
-
(lp39
|
90 |
-
cmenpofit.clm.expert.ensemble
|
91 |
-
FcnFilterExpertEnsemble
|
92 |
-
p40
|
93 |
-
asS'expert_ensembles'
|
94 |
-
p41
|
95 |
-
(lp42
|
96 |
-
g0
|
97 |
-
(g40
|
98 |
-
g2
|
99 |
-
Ntp43
|
100 |
-
Rp44
|
101 |
-
(dp45
|
102 |
-
S'sample_offsets'
|
103 |
-
p46
|
104 |
-
NsS'cosine_mask'
|
105 |
-
p47
|
106 |
-
I01
|
107 |
-
sS'context_shape'
|
108 |
-
p48
|
109 |
-
(I8
|
110 |
-
I8
|
111 |
-
tp49
|
112 |
-
sg35
|
113 |
-
g37
|
114 |
-
sS'response_covariance'
|
115 |
-
p50
|
116 |
-
I3
|
117 |
-
sS'patch_normalisation'
|
118 |
-
p51
|
119 |
-
g34
|
120 |
-
sS'_icf'
|
121 |
-
p52
|
122 |
-
Nsbasg47
|
123 |
-
I01
|
124 |
-
sS'shape_models'
|
125 |
-
p53
|
126 |
-
(lp54
|
127 |
-
g0
|
128 |
-
(g26
|
129 |
-
g2
|
130 |
-
Ntp55
|
131 |
-
Rp56
|
132 |
-
(dp57
|
133 |
-
S'similarity_model'
|
134 |
-
p58
|
135 |
-
g0
|
136 |
-
(cmenpofit.modelinstance
|
137 |
-
_SimilarityModel
|
138 |
-
p59
|
139 |
-
g2
|
140 |
-
Ntp60
|
141 |
-
Rp61
|
142 |
-
(dp62
|
143 |
-
S'_components'
|
144 |
-
p63
|
145 |
-
cnumpy.core.multiarray
|
146 |
-
_reconstruct
|
147 |
-
p64
|
148 |
-
(cnumpy
|
149 |
-
ndarray
|
150 |
-
p65
|
151 |
-
(I0
|
152 |
-
tp66
|
153 |
-
S'b'
|
154 |
-
p67
|
155 |
-
tp68
|
156 |
-
Rp69
|
157 |
-
(I1
|
158 |
-
(I4
|
159 |
-
I34
|
160 |
-
tp70
|
161 |
-
cnumpy
|
162 |
-
dtype
|
163 |
-
p71
|
164 |
-
(S'f8'
|
165 |
-
p72
|
166 |
-
I0
|
167 |
-
I1
|
168 |
-
tp73
|
169 |
-
Rp74
|
170 |
-
(I3
|
171 |
-
S'<'
|
172 |
-
p75
|
173 |
-
NNNI-1
|
174 |
-
I-1
|
175 |
-
I0
|
176 |
-
tp76
|
177 |
-
bI00
|
178 |
-
S'hJ#\xf4?\xff\xcc\xbfa4\x1c\x16\x1b\xb0\xd0\xbfu,\xd1B\xbfB\xc4\xbf\t\xfc5^\xd1\x94\xd0\xbf\xa1\xac\xdb\x94\xe8\x1a\xb7\xbfu@a\x98\\\x0c\xd0\xbf\xf3\x01\x17Q\x84\xc4\x98\xbf&\xfcP\xff}*\xce\xbf\x16\xf3\xa8j#\x01\xa3?\x92bT\xfaT\xab\xca\xbf\xf7"\x87hj:\xb7?\x0b\xcc\xf2\x90RS\xc5\xbf\xbc\xc9\x1a\x87p[\xc1?/\xc3]D\x85\xab\xbd\xbf\x12\x08\x86\xf1\x8b\xe6\xc5?\xdf\x0e\xc0v\xec\x9c\xae\xbfO\x9a$.6\x11\xc7?\x88\xcd3*=\xfei?sj\xdde\x99{\xc5?.\xd8\xe5\xe3\xc0\x9e\xb0?\x852\xe9\xf8\x91\xbd\xc0?\xd5O\xdb\xa7\xc2\x90\xbe?*\xf0\xb9`\xbe\xd2\xb5?\x9f\xb1\x17\xd8\xdd\x9e\xc5?\x8b\x8f\x14,\xd7\xcb\x9f?\xa7\xa8CH,\xc1\xca?N|\x8b\x8e2\x1f\xa0\xbfAqXQJ\x12\xce?\xe4\xb0\xe6\x1c\x9f/\xb9\xbfp\x0eSA\xa8\xbc\xcf?\x95\xf9z\x7f\xfeC\xc5\xbfGh3\xb3^?\xd0?\xa0\x02Cm\x17\x01\xce\xbf\xb4q\xf3G\xa0M\xd0?c4\x1c\x16\x1b\xb0\xd0?lJ#\xf4?\xff\xcc\xbf\n\xfc5^\xd1\x94\xd0?v,\xd1B\xbfB\xc4\xbfv@a\x98\\\x0c\xd0?\xa2\xac\xdb\x94\xe8\x1a\xb7\xbf(\xfcP\xff}*\xce?\xf4\x01\x17Q\x84\xc4\x98\xbf\x93bT\xfaT\xab\xca?\x18\xf3\xa8j#\x01\xa3?\r\xcc\xf2\x90RS\xc5?\xf8"\x87hj:\xb7?1\xc3]D\x85\xab\xbd?\xbd\xc9\x1a\x87p[\xc1?\xe1\x0e\xc0v\xec\x9c\xae?\x13\x08\x86\xf1\x8b\xe6\xc5?\x9b\xcd3*=\xfei\xbfP\x9a$.6\x11\xc7?/\xd8\xe5\xe3\xc0\x9e\xb0\xbfwj\xdde\x99{\xc5?\xd8O\xdb\xa7\xc2\x90\xbe\xbf\x862\xe9\xf8\x91\xbd\xc0?\xa2\xb1\x17\xd8\xdd\x9e\xc5\xbf*\xf0\xb9`\xbe\xd2\xb5?\xaa\xa8CH,\xc1\xca\xbf\x8b\x8f\x14,\xd7\xcb\x9f?BqXQJ\x12\xce\xbfO|\x8b\x8e2\x1f\xa0\xbfq\x0eSA\xa8\xbc\xcf\xbf\xe7\xb0\xe6\x1c\x9f/\xb9\xbfHh3\xb3^?\xd0\xbf\x97\xf9z\x7f\xfeC\xc5\xbf\xb5q\xf3G\xa0M\xd0\xbf\xa2\x02Cm\x17\x01\xce\xbf\x14\xaf\xd2Hh\x0b\xcf\xbf\xef&z\x0c\x7f<\xb5\xbc\x14\xaf\xd2Hh\x0b\xcf\xbf*>\xa3\xae\xdf\xf1\xb2\xbc\x13\xaf\xd2Hh\x0b\xcf\xbf\xa1\x8a\x90\xac\xbem\xab\xbc\x12\xaf\xd2Hh\x0b\xcf\xbfiMT\x91\x1c*\x9e\xbc\x12\xaf\xd2Hh\x0b\xcf\xbf^\x9e\xc6\xb2\xccw\x81\xbc\x15\xaf\xd2Hh\x0b\xcf\xbf \x16\xf3\x95\x10\xd8\x8c<\x14\xaf\xd2Hh\x0b\xcf\xbf\xda\xec\x02\xdfu\xa5\x9f<\x16\xaf\xd2Hh\x0b\xcf\xbf\xf3\x84\x07q\xee\x80\xa8<\x1b\xaf\xd2Hh\x0b\xcf\xbfE\x8c5\xbb\xaf*\xad<\x1c\xaf\xd2Hh\x0b\xcf\xbf\'\xa1P\\\x03\xef\xad<\x1f\xaf\xd2Hh\x0b\xcf\xbf\xf6\xbb\xb2\x0c\xb4\x86\xaa<"\xaf\xd2Hh\x0b\xcf\xbf\x7f\x0ec\xffh=\xa5<&\xaf\xd2Hh\x0b\xcf\xbf/\xf7"<\x1c(\x9f<%\xaf\xd2Hh\x0b\xcf\xbf(\x9ar\x93G\xc8\x8b<\'\xaf\xd2Hh\x0b\xcf\xbf\x16\x89\x08\xbe\x8d9|\xbc)\xaf\xd2Hh\x0b\xcf\xbf2\xfeZ%\x00\xf8\x9a\xbc)\xaf\xd2Hh\x0b\xcf\xbf.-T\x8d\xacG\xa7\xbc\xe2\xa5\xf3\xf4\xc6*\xbc<\x12\xaf\xd2Hh\x0b\xcf\xbf\xd5R\x1azA\xcd\xb4<\x12\xaf\xd2Hh\x0b\xcf\xbf\x9a\ti\x19cD\xad<\x10\xaf\xd2Hh\x0b\xcf\xbf\xb1\x9et\x82%\x97\xa0<\x0f\xaf\xd2Hh\x0b\xcf\xbf\xf9L([\xb9\x11\x82<\x11\xaf\xd2Hh\x0b\xcf\xbf\xba\xc4\x0cj\xc1\xf1\x8a\xbc\x11\xaf\xd2Hh\x0b\xcf\xbf\x8c_O\xeb\xe1\xd1\xa1\xbc\x13\xaf\xd2Hh\x0b\xcf\xbf\r@|\xc3\xde\x8e\xaa\xbc\x17\xaf\xd2Hh\x0b\xcf\xbfK\xab6\xfa\\\x95\xaf\xbc\x19\xaf\xd2Hh\x0b\xcf\xbf*\xaa\xa2\xa0\xedF\xb0\xbc\x1b\xaf\xd2Hh\x0b\xcf\xbf\x952\xff_\xbf\x9d\xad\xbc\x1e\xaf\xd2Hh\x0b\xcf\xbfp\\\n\x03yx\xa8\xbc\x1d\xaf\xd2Hh\x0b\xcf\xbf\xf2C\xd3\x0f,k\xa0\xbc \xaf\xd2Hh\x0b\xcf\xbf\xf4\xa6\xde\xf10\xaf\x8f\xbc$\xaf\xd2Hh\x0b\xcf\xbf\x08O\x93\xf5U\xa5\x86<%\xaf\xd2Hh\x0b\xcf\xbf\x0c#\xb3\xfaDo\x9d<(\xaf\xd2Hh\x0b\xcf\xbfR+\xbc\xa7\xcb@\xaa<)\xaf\xd2Hh\x0b\xcf\xbf'
|
179 |
-
p77
|
180 |
-
tp78
|
181 |
-
bsS'_mean'
|
182 |
-
p79
|
183 |
-
g64
|
184 |
-
(g65
|
185 |
-
(I0
|
186 |
-
tp80
|
187 |
-
g67
|
188 |
-
tp81
|
189 |
-
Rp82
|
190 |
-
(I1
|
191 |
-
(I34
|
192 |
-
tp83
|
193 |
-
g74
|
194 |
-
I00
|
195 |
-
S'J\x94!\x9e\xa9\xc4P\xc0\xc87\xa4W\xe2LS\xc0\xa8\xbb\xd0S\xb5nG\xc0\xfd;\xcd\nS-S\xc0\x8d)\x83)\xdc\xb8:\xc0\xb1_\xb6\xb4\x81\x8fR\xc00\x81\x8ci\x18\xa5\x1c\xc0x\xf2\xc5\xcc\xb4qQ\xc0\x89p\x95\xd0\xc0\xfa%@\xaf\xa9c|\x1e\xd8N\xc0R\x98!\xa8L\xdd:@\xce\rTB\xf4\xa9H\xc0pu\xc1\x16\n\x13D@\xcad\xd03H(A\xc0\xb2\x14\xc6\xbb9TI@\xe0\xf7T\xfd\xe0\xb31\xc0\'E\x0c\'\xa5\xadJ@\x88a9\xdd\xed\x0f\xee?e\x81#4\x89\xd8H@=\xe4\xb2\xbd\xd083@\x80\xbe\x81\xd2t\\C@\xb4\x1f\x00\\\xd8\xacA@\xe3\xca\x07\x8dR=9@\xe7?\xe0\x02S\x01I@k\xe0\xe7!\x0cc"@\xa0\xcbE\x15a\xf1N@\xf8\xb3\xf0\x85J\xa5"\xc0S\'\xf6\x01\xb6cQ@j_g~\xf7 =\xc0J\xf8<oDZR@\x05Ol\xe59\x98H\xc0\x12\xd0\x13\xff\x7f\xcaR@\xf2\xef\xa4\xef\xc3YQ\xc0\x87\xe0\x05\xe7\xfc\xdaR@'
|
196 |
-
p84
|
197 |
-
tp85
|
198 |
-
bsS'template_instance'
|
199 |
-
p86
|
200 |
-
g0
|
201 |
-
(cmenpo.shape.pointcloud
|
202 |
-
PointCloud
|
203 |
-
p87
|
204 |
-
g2
|
205 |
-
Ntp88
|
206 |
-
Rp89
|
207 |
-
(dp90
|
208 |
-
S'points'
|
209 |
-
p91
|
210 |
-
g64
|
211 |
-
(g65
|
212 |
-
(I0
|
213 |
-
tp92
|
214 |
-
g67
|
215 |
-
tp93
|
216 |
-
Rp94
|
217 |
-
(I1
|
218 |
-
(I17
|
219 |
-
I2
|
220 |
-
tp95
|
221 |
-
g74
|
222 |
-
I00
|
223 |
-
S'J\x94!\x9e\xa9\xc4P\xc0\xc87\xa4W\xe2LS\xc0\xa8\xbb\xd0S\xb5nG\xc0\xfd;\xcd\nS-S\xc0\x8d)\x83)\xdc\xb8:\xc0\xb1_\xb6\xb4\x81\x8fR\xc00\x81\x8ci\x18\xa5\x1c\xc0x\xf2\xc5\xcc\xb4qQ\xc0\x89p\x95\xd0\xc0\xfa%@\xaf\xa9c|\x1e\xd8N\xc0R\x98!\xa8L\xdd:@\xce\rTB\xf4\xa9H\xc0pu\xc1\x16\n\x13D@\xcad\xd03H(A\xc0\xb2\x14\xc6\xbb9TI@\xe0\xf7T\xfd\xe0\xb31\xc0\'E\x0c\'\xa5\xadJ@\x88a9\xdd\xed\x0f\xee?e\x81#4\x89\xd8H@=\xe4\xb2\xbd\xd083@\x80\xbe\x81\xd2t\\C@\xb4\x1f\x00\\\xd8\xacA@\xe3\xca\x07\x8dR=9@\xe7?\xe0\x02S\x01I@k\xe0\xe7!\x0cc"@\xa0\xcbE\x15a\xf1N@\xf8\xb3\xf0\x85J\xa5"\xc0S\'\xf6\x01\xb6cQ@j_g~\xf7 =\xc0J\xf8<oDZR@\x05Ol\xe59\x98H\xc0\x12\xd0\x13\xff\x7f\xcaR@\xf2\xef\xa4\xef\xc3YQ\xc0\x87\xe0\x05\xe7\xfc\xdaR@'
|
224 |
-
p96
|
225 |
-
tp97
|
226 |
-
bsS'_landmarks'
|
227 |
-
p98
|
228 |
-
NsbsbsS'similarity_weights'
|
229 |
-
p99
|
230 |
-
g64
|
231 |
-
(g65
|
232 |
-
(I0
|
233 |
-
tp100
|
234 |
-
g67
|
235 |
-
tp101
|
236 |
-
Rp102
|
237 |
-
(I1
|
238 |
-
(I4
|
239 |
-
tp103
|
240 |
-
g74
|
241 |
-
I00
|
242 |
-
S'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
|
243 |
-
p104
|
244 |
-
tp105
|
245 |
-
bsS'_weights'
|
246 |
-
p106
|
247 |
-
g64
|
248 |
-
(g65
|
249 |
-
(I0
|
250 |
-
tp107
|
251 |
-
g67
|
252 |
-
tp108
|
253 |
-
Rp109
|
254 |
-
(I1
|
255 |
-
(I30
|
256 |
-
tp110
|
257 |
-
g74
|
258 |
-
I00
|
259 |
-
S'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
|
260 |
-
p111
|
261 |
-
tp112
|
262 |
-
bsS'_target'
|
263 |
-
p113
|
264 |
-
g0
|
265 |
-
(g87
|
266 |
-
g2
|
267 |
-
Ntp114
|
268 |
-
Rp115
|
269 |
-
(dp116
|
270 |
-
g91
|
271 |
-
g64
|
272 |
-
(g65
|
273 |
-
(I0
|
274 |
-
tp117
|
275 |
-
g67
|
276 |
-
tp118
|
277 |
-
Rp119
|
278 |
-
(I1
|
279 |
-
(I17
|
280 |
-
I2
|
281 |
-
tp120
|
282 |
-
g74
|
283 |
-
I00
|
284 |
-
S'I\x94!\x9e\xa9\xc4P\xc0\xc77\xa4W\xe2LS\xc0\xa7\xbb\xd0S\xb5nG\xc0\xfc;\xcd\nS-S\xc0\x8c)\x83)\xdc\xb8:\xc0\xb0_\xb6\xb4\x81\x8fR\xc00\x81\x8ci\x18\xa5\x1c\xc0w\xf2\xc5\xcc\xb4qQ\xc0\x88p\x95\xd0\xc0\xfa%@\xae\xa9c|\x1e\xd8N\xc0Q\x98!\xa8L\xdd:@\xcd\rTB\xf4\xa9H\xc0ou\xc1\x16\n\x13D@\xcad\xd03H(A\xc0\xb1\x14\xc6\xbb9TI@\xe0\xf7T\xfd\xe0\xb31\xc0&E\x0c\'\xa5\xadJ@\x84a9\xdd\xed\x0f\xee?d\x81#4\x89\xd8H@<\xe4\xb2\xbd\xd083@\x7f\xbe\x81\xd2t\\C@\xb3\x1f\x00\\\xd8\xacA@\xe2\xca\x07\x8dR=9@\xe6?\xe0\x02S\x01I@j\xe0\xe7!\x0cc"@\x9f\xcbE\x15a\xf1N@\xf7\xb3\xf0\x85J\xa5"\xc0R\'\xf6\x01\xb6cQ@i_g~\xf7 =\xc0I\xf8<oDZR@\x04Ol\xe59\x98H\xc0\x11\xd0\x13\xff\x7f\xcaR@\xf1\xef\xa4\xef\xc3YQ\xc0\x86\xe0\x05\xe7\xfc\xdaR@'
|
285 |
-
p121
|
286 |
-
tp122
|
287 |
-
bsg98
|
288 |
-
NsbsS'global_transform'
|
289 |
-
p123
|
290 |
-
g0
|
291 |
-
(cmenpofit.transform.homogeneous
|
292 |
-
DifferentiableAlignmentSimilarity
|
293 |
-
p124
|
294 |
-
g2
|
295 |
-
Ntp125
|
296 |
-
Rp126
|
297 |
-
(dp127
|
298 |
-
S'_h_matrix'
|
299 |
-
p128
|
300 |
-
g64
|
301 |
-
(g65
|
302 |
-
(I0
|
303 |
-
tp129
|
304 |
-
g67
|
305 |
-
tp130
|
306 |
-
Rp131
|
307 |
-
(I1
|
308 |
-
(I3
|
309 |
-
I3
|
310 |
-
tp132
|
311 |
-
g74
|
312 |
-
I00
|
313 |
-
S'\xff\xff\xff\xff\xff\xff\xef?^\xf0\xcd\x1dB\x8fd<\x00\x00\x00\x00\x00\x00\xd09 \xb2\x1d\xb53\x9e_\xbc\xff\xff\xff\xff\xff\xff\xef?\x00\x00\x00\x00\x00\x00\xf0\xb9\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0?'
|
314 |
-
p133
|
315 |
-
tp134
|
316 |
-
bsg113
|
317 |
-
g0
|
318 |
-
(g87
|
319 |
-
g2
|
320 |
-
Ntp135
|
321 |
-
Rp136
|
322 |
-
(dp137
|
323 |
-
g91
|
324 |
-
g64
|
325 |
-
(g65
|
326 |
-
(I0
|
327 |
-
tp138
|
328 |
-
g67
|
329 |
-
tp139
|
330 |
-
Rp140
|
331 |
-
(I1
|
332 |
-
(I17
|
333 |
-
I2
|
334 |
-
tp141
|
335 |
-
g74
|
336 |
-
I00
|
337 |
-
S'J\x94!\x9e\xa9\xc4P\xc0\xc87\xa4W\xe2LS\xc0\xa8\xbb\xd0S\xb5nG\xc0\xfd;\xcd\nS-S\xc0\x8d)\x83)\xdc\xb8:\xc0\xb1_\xb6\xb4\x81\x8fR\xc00\x81\x8ci\x18\xa5\x1c\xc0x\xf2\xc5\xcc\xb4qQ\xc0\x89p\x95\xd0\xc0\xfa%@\xaf\xa9c|\x1e\xd8N\xc0R\x98!\xa8L\xdd:@\xce\rTB\xf4\xa9H\xc0pu\xc1\x16\n\x13D@\xcad\xd03H(A\xc0\xb2\x14\xc6\xbb9TI@\xe0\xf7T\xfd\xe0\xb31\xc0\'E\x0c\'\xa5\xadJ@\x88a9\xdd\xed\x0f\xee?e\x81#4\x89\xd8H@=\xe4\xb2\xbd\xd083@\x80\xbe\x81\xd2t\\C@\xb4\x1f\x00\\\xd8\xacA@\xe3\xca\x07\x8dR=9@\xe7?\xe0\x02S\x01I@k\xe0\xe7!\x0cc"@\xa0\xcbE\x15a\xf1N@\xf8\xb3\xf0\x85J\xa5"\xc0S\'\xf6\x01\xb6cQ@j_g~\xf7 =\xc0J\xf8<oDZR@\x05Ol\xe59\x98H\xc0\x12\xd0\x13\xff\x7f\xcaR@\xf2\xef\xa4\xef\xc3YQ\xc0\x87\xe0\x05\xe7\xfc\xdaR@'
|
338 |
-
p142
|
339 |
-
tp143
|
340 |
-
bsg98
|
341 |
-
NsbsS'allow_mirror'
|
342 |
-
p144
|
343 |
-
I00
|
344 |
-
sS'_source'
|
345 |
-
p145
|
346 |
-
g136
|
347 |
-
sbsS'model'
|
348 |
-
p146
|
349 |
-
g0
|
350 |
-
(cmenpo.model.pca
|
351 |
-
PCAModel
|
352 |
-
p147
|
353 |
-
g2
|
354 |
-
Ntp148
|
355 |
-
Rp149
|
356 |
-
(dp150
|
357 |
-
S'centred'
|
358 |
-
p151
|
359 |
-
I01
|
360 |
-
sg86
|
361 |
-
g0
|
362 |
-
(g87
|
363 |
-
g2
|
364 |
-
Ntp152
|
365 |
-
Rp153
|
366 |
-
(dp154
|
367 |
-
g91
|
368 |
-
g64
|
369 |
-
(g65
|
370 |
-
(I0
|
371 |
-
tp155
|
372 |
-
g67
|
373 |
-
tp156
|
374 |
-
Rp157
|
375 |
-
(I1
|
376 |
-
(I17
|
377 |
-
I2
|
378 |
-
tp158
|
379 |
-
g74
|
380 |
-
I00
|
381 |
-
S'\xd1\xe5\xf7\xba\x17\xb4L\xc0\xb0$3\x9e\x1d\xaeS\xc0\xfb)8>|\xa4C\xc0\xadu^\x0e\xd3\x10S\xc0\x96xK\x8c\xa4\xaa5\xc0\xc3\x07.\xa0KBR\xc0\xf4U\xed\x86\xb17\x14\xc01\xc7+\r\xc6pQ\xc0\xd7\xf5\xbc\xd9\xd6\x0e(@\x87\xd6\xd6<N\xe0P\xc0@5fx\x8d\xa99@\xb1\x8d\xde\x81\x91@N\xc0{\x07\xa1\xbe\x9c\xd5B@CC\xae\x04\xbaGI\xc0OZ\xb8\xaa#\xfeD@\x97\xfa\xea(j\xb2=\xc0m?t\x14\xf0KC@\x84\x7f\x05\x12\xd9\xa8\x0b\xc0@\x9f@\xfd7\x83C@\x8e\xf7\x1a\x1a\xdc%;@\xd8\xc6q\xd6i!A@`\x0f\xed\x92P\x92G@\xaejU\xe8\x8f\x016@)\x07Y :~O@m\xeeb&\xfd\xe2 @\x8b_._\xb1\xcdQ@\xaaV\xb0\xec@\xff#\xc0\xdd\\W\xd6\xde\xbfR@\xd0\xfa\xa6\xfa\xe3k:\xc0\x80\xa1H\xd5\xf8\xefR@\xdbM\xb9U2\x1bE\xc0\xd5\xee\x1f\x05\xd3\xddR@=xy\x03\x85\xd0L\xc0\xf6|\xbb<?\xb3R@'
|
382 |
-
p159
|
383 |
-
tp160
|
384 |
-
bsg98
|
385 |
-
NsbsS'_eigenvalues'
|
386 |
-
p161
|
387 |
-
g64
|
388 |
-
(g65
|
389 |
-
(I0
|
390 |
-
tp162
|
391 |
-
g67
|
392 |
-
tp163
|
393 |
-
Rp164
|
394 |
-
(I1
|
395 |
-
(I30
|
396 |
-
tp165
|
397 |
-
g74
|
398 |
-
I00
|
399 |
-
S"6\xf1\xa2i\xa9gy@\x9e3\xdc\x84\xdb\xb5i@\x9bi\x02\xdd9\xabR@4~K\x88'AC@}\xce\xcfWb\xd9:@\xb4\xfcv\x12H\xfe0@\xd1\xab\xfd\xfa\xc5\xa4-@\x9f\x12\x85\xd1\x9b\x91&@\x9fa\xe4\x07\x8c\x8f\x1d@\xff\xa3\x99\xda\x9a)\x15@\x99\x80\x80}\x03\x94\x11@\xd9F\xde\xb1\x1f\xca\x08@^cZw)\xaa\x04@\xdd\xeb4+\xe26\xfe?n\xb0\xe2\xc7_=\xfc?\xec\xa6\xe9\nO\xef\xf5?0\xc8\xf3G<\xda\xf3?\r$\x9e\x01\xccJ\xf2?\x01\x94\xc26\xd4\x91\xf0?q/S\xab:s\xee?\x82\x95^\xed\x0c\x97\xeb?\xb0l\xa1DL@\xea?\x18\xdf!Cgc\xe8?\x9a\xaeT\xac\x8b\xd8\xe6?c\xbe\x97QS\xff\xe5?\xd9\xb7\x18\xba\xe7:\xe4?\xed\x7flo\t-\xe2?`\x18\xe4\xd1\x173\xe0?D\xdft\x02\xb1\xcd\xdd?\x13\xa1\xa7\xf09$\xd7?"
|
400 |
-
p166
|
401 |
-
tp167
|
402 |
-
bsS'_trimmed_eigenvalues'
|
403 |
-
p168
|
404 |
-
g64
|
405 |
-
(g65
|
406 |
-
(I0
|
407 |
-
tp169
|
408 |
-
g67
|
409 |
-
tp170
|
410 |
-
Rp171
|
411 |
-
(I1
|
412 |
-
(I1
|
413 |
-
tp172
|
414 |
-
g74
|
415 |
-
I00
|
416 |
-
S'\x93Ex\xe8/e\xd3?'
|
417 |
-
p173
|
418 |
-
tp174
|
419 |
-
bsg63
|
420 |
-
g64
|
421 |
-
(g65
|
422 |
-
(I0
|
423 |
-
tp175
|
424 |
-
g67
|
425 |
-
tp176
|
426 |
-
Rp177
|
427 |
-
(I1
|
428 |
-
(I30
|
429 |
-
I34
|
430 |
-
tp178
|
431 |
-
g74
|
432 |
-
I00
|
433 |
-
S'\xb8\x86\xd5\xb9\x9c$\xd6\xbfZw\x81\x02\xe6e\xb5\xbf\xda\x05\xcb\xf8\xe3\xe9\xce\xbfWT\x91\x1f@\xa4\xbb\xbf\x12\x14\x83t\xd6\n\xc1\xbf\xcb\x01\xbb\x95\xc8\xe6\xc2\xbf\x84\xa6\\\x9a\xe2\x12\xa1\xbfH\x18D\xfcdD\xc7\xbf\x16\xc7U\x1c\xa4B\xa1?\xf9\x18\xb9\x05\x18\xe4\xc0\xbf)d\xabv<?\xb0?\xe5^\xf2\xe9\x03\xb3\x80\xbf=<\xfa\xce5\xd8\xae?\xbf\xd5@\xdcI\xac\xc3?\x10\xdfK\xce\x08\xcd\x9a?\x17\xa7@\n\xa4\xe9\xd3?\xffm\x96\x19&-\x85?\xe7\x9f0\xe8\xaf\xc5\xd7?\x99e\xc5\xc0\xf5\xaf\x81\xbf\xd4\x81\x90:q/\xd4?\xd8\xf2\xad\xb6\x9d\x02\xac\xbf\x16Ct\t\x8e\xa5\xc5?%843V\xd2\xb2\xbfeWr\x9f\x99\xbb\x90?\xcd_g$\xd3\x8a\xab\xbfF\xd5\xf6\x88VB\xbd\xbf\x90\xd0e\xa4z\xd6\x90?\xe5\x96\x07\xf0\xe4\x16\xc7\xbfV2\'[\xa8\r\xc0?\xdfo{\x84\x7f\x86\xc4\xbfJ$\xb1n\x0fO\xcf?c+\xdc\xcd`\xff\xbe\xbf\xda8\x05\xf2\xf1O\xd7?\xf5^\xb3\xd8\x18\x92\xb9\xbfU\xbc\xc7\xf0`{\xd0?,\x948\xf0\x88`\xc5\xbf/\x01\xbc\xe0\x84\xfc\xca?\xa8\x07X_\x81\x9b\xc2\xbf\x8d\xd7(\xcf\xe6\r\xc3? \xa4\xa4\x17\x9a4\xc3\xbf\xfaB\xb2\xd5\r9\xb8?\xc72\xd1\x18]\xd2\xc4\xbf\x0b\x96^\xbd9u\x9f?|\xae\x12x\x91*\xc6\xbf\xabx\xdd\xc8\xc5#\xaf\xbfy\x17\x9e\xce}\xe3\xc2\xbf\xc4\x93[t\xf6t\xc8\xbf\xd3\x1b:I\xde\xe6\xb2\xbf\x17\xdc\xd2\xde\x9b\xcd\xd3\xbfPw~\x85x\xde\x90\xbf<e6Pd?\xd6\xbf\x0e\x1f\x95,C\x92z?=\xf0U\xc4\xf4\xde\xd4\xbf\xcd\xbe\x9b\x83*\xa7\x9d?\xfcI\rK\xa7\x94\xca\xbf%\x1d\xfe\r\xb5!\xb4?\x1d\x0bx\xd7_\x7f\xb0\xbf\xa1Gw\x01\xed\xc8\xc1?\xbfw\xde/\x8a\x92\xa6?\xd3\x1e\x01*\xbdq\xc4?VH\xb5\xc2d\xf1\xbb?\xba,\xba\x87\xc3\xa7\xc3?g7\x8b)\x88\x88\xc4?}678\x11\xe3\xc2?\x83\x81i\xe7\xean\xca?\xfa2\xa0=\x11\x98\xc3?\xf1c\xdaQ\x05I\xcf?~\xe9"\xe2l\xa8\xc5?>\x98C\xe3\xf7\xe7\xc8?\x81;8\x89\xe2C\xcd?\xe4Pm@\xac\x91\xad?\xe9J;\xa4\t\x9e\xc8?\x92\xc1\x1e\x8c\x08\xeb\xb1\xbf\x93\xfe^83[\xbe?^\xf5\x13\xc0\xf5\xa1\xc1\xbfK\xb7o(\xaf\xf3\x88\xbf\xfd{\xc4\xcf\x10c\xc0\xbfg\x07}t\xe3\xed\xc8\xbf\xd8;N\xe3\x18R\xb2\xbf\x87Q\xe6\xdc\xc37\xd5\xbf\x10C2#\x00\x16P\xbf\x9b\x03n\xfd#\xb0\xd5\xbf\xa9\xfaM\xc9\x84\xe3\xb2?\xfd\x0f\xa3\x8c>\x15\xcb\xbf>@]\n\x9b#\xbd?X\xe1\n\xc0U\xc4r\xbf\x17!\xb0\x9d\xc7Q\xbb?\xb7g\x18Jc\x87\xca?P\x05\xab\x94\xd3\xc4\xab?1rV\x81\x9e%\xd5?\r\x82\xb8\xb3\x14\xd0\x9f\xbfr\x8d,\xed\x1d\x95\xd4?R@4\xe0\xd7\xe5\xbc\xbfA\xbe\xb2,\xf0\xe6\xc9?9\xc8q\xb72\xd5\xc2\xbf\x89\xd4\xaf\x8e\x13T\x96?\xd2\xa2\xe9BoR\xb7\xbf\xd4\x1b\x19\x8e\xd1\x0c\xbc\xbf\x04\xf6Z\xa7\x03\xbd\x99?.bx\xd2\xf6\xa8\xc7\xbf\x16d*!p\r\xc5?N!u\x9f%\x16\xce\xbf\x0eq\x8a\xbd\x8aI\xc9\xbf\x80\xa9\x92Z\x91{\xd0?H\xb0\xd8\x7f\xe9D\xb9\xbfQ({\n)P\xc2?^\xde\xea\xba6\x14\xb1?!\xbf\xf4\x19\xa0?\xaa?\xc7\xc9\xc5F\xa5G\xca?u1\x91\xf7\xa6\xfd\x92\xbf\x88\xf2\x91*\xce\x17\xd2?)\\s8\x95g\xa4\xbf\xea\xd3\xa3 V\xe2\xd3?\xb3\x83\x94\xe6\xf2 \xb0\xbf\x15\xef\x95\x85\xa5\xe2\xd1?\xa6:+y\xc7\xa3\xbd\xbf\xa0\x94b\xda\xd8\x0b\xbf?\x91O\xc3T\xf3u\xc3\xbf\x94\x0ee0pGd\xbf:u\xce^:\xb5\xc4\xbf\x90\xa3\xf0\xc7Qh\xbf\xbfFEv\xad\xf1\x13\xc2\xbf\xb7q\x8en\xae\xde\xd0\xbfY\x05\xf3\x92%l\xbc\xbf\xc8\x80q\xf2\xee\n\xd4\xbf5\x9e`\xd2G\x82\xa7\xbf\xb8\xd3\x11R]\xc1\xd2\xbfJ\x82\x17\xc6\xc7\xf4\x88\xbf>\x99\x94z\xc0\x95\xcb\xbfr<\xa44\x89]\x8c?\x89\xb3.\x9e"\xbf\xb3\xbf\xe6\xae\xd3\x81f\xe0\xb1?3J\xffzr\x08\xb6?\xa9\x9f\x82\xbd^!\xc0?:\x9b|a\x13\xa7\xcd?\xcc\xa2\x95\xa0i\x92\xc9?\x05\xe4o\x8b\x0b\xf2\xd5?\x0f\xf5\xee0\x12\x7f\xa3\xbfO\x1f\x85Z`\xf3\xc3?\xef\xe95C\xaa\x90\xb5\xbf\xa5e\xabDd:\x9d\xbfLr1=\x9f\xc6\xc1\xbf\xbe\xe3]\xf0|\x8c\xc7\xbf\xb3\x15\xfe\x02\xdeP\xbb\xbfw\x8b\xc4\t\x97\xc8\xd1\xbf\xec5OmU\x8c\x88\xbf\xfe\xfd\xeb\x11D \xd0\xbf\xe3\x04\xceD\x1cq\xb6?"\x83\xb0E\xaf#\xb6\xbf2\xa9r\xc8\xec\x03\xc0?,#\x02\xe3\x93S\xcb?\x12\x16\x8a\x97\x9eD\xba?j\xfb\xcdd\xcf\xfb\xd4?\x05\xad<K\x93b\xa8?|3t\xf3\xc4\xbb\xcd?\xddp\xaf7\x88U\xa8\xbf3\xc1\xe3sLb\xb0\xbf:8\xbe\xa3\xab\xb1\xc3\xbf\x0c\x88\xce#\xfd\x95\xce\xbf!\xf5q\xce\x84\xa5\xc7\xbf\x9b\xa8\x94G\x12\xa6\xd1\xbfH\x9b\xc5\x0c\x85\x9c\xb8\xbf\xd7\x10:\xd2\'&\xc7\xbfd\x1e\xe6y\xff\xab\xa9?\x86\x17\xb0\xee\x8f\xb3\xab\xbf?\xac\x99\xabz\xa4\xc4?\xa4\xab\x99\x83\x13\xa2\xbb?\x02g\xc2X$\xf5\xc3?l5\\\x1d\x19\xc7\xd0?:}\x97]\xb4\xde\xc0?\xb3\x05\xb0k\xb6\xa8\xc1?\xb6e\x07\xcf1\xce\xe0?\xaf\xfe\x9e[\xd5\r\x9f?\x91\x96y\xa5\x10\x8e\xcd?x\xbbq\x87\x85\xc5\x8b?#\xca\xcd\x10\xccW\xbb\xbf\xd05*Z\x1e\x12\xac?V\xbd \x16\x1f\xe4\xd6\xbf\xb8\xf7c \x04\x81\xab?\xae\x10\x9e]\xfc?\xd7\xbf\x90q\xc4\x02\xddA\x9e?%\x0f\xc4v\x19V\xc9\xbf\xa1\xben\xb2\x81\xb3\x9e\xbf\xaed}J\x17\xec\xb3?\x91\xc7\x05O\xeep\xb0\xbf\x13\xc5\xbaY/\xa0\xd1?\xc7\x0c\xeey\x1d\xf4\xb0\xbf6\x05\x7f\xb2\x9d\xa9\xca?\xf8\x87\xb7\xeb\xa1\xcd\x90\xbf\x93\xacH\xf2/5\x93?\x07\\\xd4\xd9L!\xae?D"\x8fB\xe7\x8d\xc4\xbf\x91Q\x95\x01=\xa4\xb9?\x05&\x94,XH\xca\xbfL\xf0*>\xb4\xad\xb7?\xd5y\x9e\xb9\xb1\x03\xbe\xbf-4n\xf5\x00\x0f\xa5?[\x1a\xaa\x95\xed\xec\x9b?\xb2+X\xa8\xc5\x17\xb0\xbf\x92\rWI\x1f\xc1\xbc?r\xfc\x142\xec>\xc5\xbf\x11\x10\xa5\xa8\x8fc\xb0?\x83\x1a\xc8c`{\xca\xbf\x11\x0b^!c\xd9\x9f\xbf\xf2\xadv\xcbh\xd0\xa5?/\xdc\xc5\xb7r\xdb\xc4\xbfl{\x00\x1b1\xf8\xc0?\xbcv\\7v\xc0\xb0?\xa1(S1\xdf\xd3\xb6?\x05\xeb\x90e\x8fS\xcd?\xd2\xdc\x81\xeb\xf5D\x97?\x0c\xde\xc8\xee6\xc9\xc4?\xb3\xd8\xa1\xae\x86 \x93\xbf\xc5 \x18\x86\\<\xad\xbf\x1d\x9en\xc8D\xda\x9b\xbf\xef\xe5\tyq\xa5\xce\xbfNO\xd8\x0f\x99\x12\x96\xbf\xec\x18q\x1e\xf7w\xcc\xbf\x93\x8b@\x18W\xd2\x9b\xbf[\x12\x96\xb4\xf2q\x88\xbf\x81\xe8bHE\x1b\x98\xbfQ\xbbL\n\x00\xe8\xc8?3\x84\x18\'=F\x92?\xf7\xd9.S\xea\t\xcc?\xc2\x8a:#\x1f\x02\xb4?\xeb3\xd5\xc4\xb2\x0c\xb8?4}\x9c\xba\xf9\x83\xa6?\x8e\xe0N\xaf\xadw\xbd\xbf`%\x84\xaa\xfa\xb1\xa8\xbfi\x00\xe4\x06ff\xd4\xbfY\x98`\xef[\xb8\xb2\xbfk\xed\xc43T\xf7\xd8\xbfV\x1e=\xce\xfc\xcd\x9c\xbf]\xa9rq\xfc\xb0\xc6\xbf\xf6ZO\xaell\x9f\xbf\xf8\xc9v\xcf\x02\x82\xc9?\x15\x00KZUz\xc0\xbf\xff\xce\xc5V\x04\x00\xe1?c\xe3a\x8e@y\xcc?\x9d\x96`\x18\'J\xb2\xbf\t$U\xaag\xe6Y\xbf\x07\xd7\xab\xb7\x0fA\x8c\xbf\xce\xe3\xfe*\xbf\xd5\xcc\xbf\xea\x06[\xdcA\xe8\xb2?\xbaP\xee\xa7\xa0\xe3\xd3\xbf\xf9\xc4\xaal\x84\x94\xbf?\x18\xca\xc7\xe2\xa3\x96\xc3\xbf\xa0\xa3z9%\x9c\x91\xbfh\xd9\x8e\x02\xf4\xc6\xc1?W\xfd\xe2\x9d\x9e\xc5\xbd\xbf\x9a6\x1b)\xd0[\xd9?-\x93\xbe\xe4\x0c\x91\xb7\xbfU\rK\x9aKh\xd4?\xdd>\xa3\x89\xaf\x0b\xb6?\xc0jd\x14\x06\xabi?\xdf^\xd6Rc\x1b\xbb?4\x95\xcb\x0b{]\xd3\xbf\xef%\xc8\x1c&a\x9d?\xe1\xe1\x8a-\xb7\x18\xd6\xbf\xcf\xb4\xd1\x16\x95*\xac\xbfX\x02?B\xf3\xaf\xc3\xbf\tDat\x19\x8f\xaa\xbf\xd1\xab(l{.\xc2?\x83\xeaZ\xc35:z\xbfOp\xdf\x921 \xd2?\xc3\xe4\xeedm\xb7\xa4?\xc0.y\xe5\xa7E\xca?Bq\xba\xcb\x87\xbb\x96?>\xc0\x01N\x16\'9\xbf\x180q\x80\xd9^\x8c\xbf\x81\xa6d:B\xd3\xcb\xbf\x89\xcb\xc7?\xcf\xd4\xa5\xbf\xd4\xdfaI\xe0y\xce\xbf\xc5B\xb8\xfb\xea\xa4\xb8?\x8f\xdaq\x9f\x83^\xa0?C\xa0\xeaE]\x00\xa3?\xdf1\xbe\x94^\xd9\xcf?\x8co\x01-\xa1s\x9e\xbf]o\x159\xf9\x7f\xd0?\x1c9\xe6Cal\xa4\xbf&\xf9\x0b\x08z\x9e\x95?\xbf\xec\xf0 7\xf2\xa4? \x94\xba\x9b\x8b\x18\xce\xbfd8\xe8\x99\xa1)\x8f?\x92T\xe6\x00\x99\xfa\xd1\xbf\xe8\x8dQ#o\xfb\xc1\xbf\xc0<\xa3\x02q\xfe\xb3?\x98\xdd\xe9w\xf6\x80\xc9\xbf\x84\x1c\r4\x05\xa9\xd1?\x9eA\x04\x9b}J\xa4?/\xee\xb3+\x00\xb8\xb8?\xd6*\xf4\xe7\x05_\xce? \x83\x8d\x0f\x1f\xbe\xd1\xbfHm[\x17\xee\xbf\xb6?\xf5v\xfc~\x858\xd3\xbf\xc6\x86\x10\xfe8\xcb\xb7\xbf\xed\x9cK\x89S\xf6^\xbf\x98\xbe\x00\x94\\\x1a\xb6\xbf\xc1\xf8\x94\xa9C\x95\xd2?\xdf\x91\nS\xa8\x98\xac?Y\x82\xe4B-\x92\xd1?\xea\'#\x99\x96\xe6\xb9?\x90\x81\x9b\xf7\xb5\x97\xa0?spq\xa6\xf3\xa3E\xbf\x80\x81k\xed\xff\x89\xd1\xbfA\xed\xc8<\xcb|\xbf\xbfQ\x8f\xf3\x92K#\xce\xbf\x8f\x85\x8f\xc6V\xf7\xd3\xbfJ\x91]|i\xcf\xad?\xe1\xa9(\xd2\xaf\xe0\xb9?pa$\xf6\xc6G\xc4?\x02\x1a\xcbS\xbc\xb6\xd8?\xf7\x82\x91]\x0f\x02\xb0?\xb0\x9c\xefC\xad\xf0\xcc?\xe1\x91\x812\xad8\x89?F\xb9F\x02\xca[\xc8\xbf\x01\xf2F\x9f\xbc\'\xb0\xbff\xd9\xba\x00\x07\xd9\xd5\xbf\xae\xa1z\x12"3\xc0\xbf\xe6,gn\xbf|\xae\xbfz\x871\xcam\xeb\xa7?\xbc\xbc\xd9\xeehZ\xd2?}\xd5\x9eO\x17u\xb1?\xec\n\xa7\xa5\xe8\xc0\xc8?\xde\x86\xb1iu\x04\xac?\xc8\xb6\xed\x89\xad\x89\xc5\xbf\x9fl\xf0\xdcd\x06\xa6\xbf\xcb{P\'o\t\xd4\xbf\xb8&W\x15\x8e5\x88\xbfoL\x1c\x82C6\xb9\xbfoJ\x150\xd1#\xa8?\xa1\x8e\xbeX\x96\x90\xc7?\x90\x84~\rS2\xac\xbf5\xf0\xa2\n}y\xcf?>\x8b\x8c91~\xb7\xbf\xb7K\xb6\x1a\xe9+\xb7?\x1d\xc3\xa5\xf3\xc6\xd4Q\xbfla\x9f\x01\x87@\xb3\xbf \x91l\xc7\xd0\x9d\xbe?\x13\x87\xcf\xc1\x99\xf0\xc3\xbf\xb4Z\xfd\x94"\x0f\xa2?\xdaI9\xbd\x1b\x19]\xbfb)4\xc8\x9f)\xae\xbf=\xcd\\\x03\xdf\xc3\xc0\xbfv-V\x10\t\xb8\xa1\xbf\xa7\x84\x90=\xef\xa0\xaa\xbf\x9e\x9b$\xa5k\xc2\x81\xbf$\x8fG\x91\x90\xd2\xc6?C\x8b6\xad{i\x92?\x98"TIM\xa9\xca?\xd6-\xe2K\xdb\xbe\xb3?\x17\xc3\x8c&\x19\xe1\xa2\xbfM\xcb\x0e\xe0/x\xb9?\xe9";,\x9c\x8a\xd1\xbf\xbe\x9a\xeak8\xd9\xb9\xbf%Gl\x0e\x14\x7f\xc5\xbf\xd9\x93{N\x19i\xc2\xbf\x859\\\x8d,[\xc4?t\xb7\x10Y\xe6d\xaf\xbfY]\xbbB\xcb\x0e\xd3?X@a\x8fta\xc5?\xff\x80\xfc\xc6\x9a\xe6\x83?1\xc4GH+\x1f\xb8?\x8b\xee\xe1Q\xc5\xf7\xd4\xbfCUq\x8b*x\x9d\xbf\xfd\xa8\xc4\xce\xff\xb2\xd1\xbfQ\x1f\x0bL-\xcc\xaa\xbf\xab\xc69\x9fG`\xc4?\x1aT\xdb\x0f\xc8\xcb\xbb\xbf\x08\xa9Y\xca\xd4\xe9\xdb?\x03\x89\xb1\x86\x01\xd4\xb5\xbf\xb3\xf3\x98c\xbas\xc3?x\x88}\x96\xed\x97\xc8?I\x16\xab\x1a&I\xd5\xbf8\xf3\xf3s\x00?\xc6\xbfy\xe1+\xfc\x01D\xc4?\x97\x95N\xc8\xfe\x97\xba?\xad[\xd9\x04\x043\xac\xbf\x07"\x7fi;\xa3\xcf?\xbdl<\xe5\xdee\xc9\xbf\x1c`c\'\xb1w\xac?\x80`\xf5>\xbb\xfb\xa9\xbf\x11c\xc5G\xd6S\xc7\xbf>\x12\xfa\x83Fy\xc4?{g\x91\xa2\xb3F\xcf\xbf\xa4\xdc;\xa3zk\xb5?\xdb\x9eS\xa2\xb6F\xb3?\x8c&\x9f\xce\x12]\xc0\xbf_=\xf0\xcf)\xf5\xd1?+QH\xfdPp\xb6\xbf\xe6t\xdb*\xa8 \xa4?\xaf\x0b\x18b\x12\xf8\xc0?\x1f\xd1\xdbzr\xb2\xd1\xbf\xc9\xa6\xe5#m\xe2\xb6?\xe5/\x95\x10\xb0\xcf\xc7\xbfm\xc7\xdck\x949\xbc\xbf\x1b\x1a\xa6\xf9\xf5\x85\xce?\xc8\xb2\x10\x8a\xf5\x10\xa6\xbf\xb8\xcbv"+.\xd4?*w\xe7\x04*\xbe\xc0?\x80S\x89\x9e"\x7f\x9f\xbf\xe4\xc0Sl\xeb\x87\x93?g\xe6\x8bp\x0c\x0f\xd7\xbf\x0b\x07\x9a\xa1\xfep\xc7\xbf\xa9\xec\xea/N \xc1\xbf\xe1\xa6\x1a\x8b\xe0K\xb2\xbf\x08V\x97w\x1e\xb7\xcd?m\xa5\xccJJ\xb3\xc3?\x83\x05\xc6K0\xc1\xcc\xbfm/\x11\xa9\x01\x05\xc8?CKg\xc80\x1f\xc1?pk\x0f\x1b\xa8\xb0\xc1\xbf\x98A\x7f(\xf7\xa4\xd5?\xe1I2\x06#\xd2\xc7\xbfc\x99\xf4\xf5#\xfd{?\xc8m"\x7f\x0f\xf0\xbb?>\xae\xffYY1\xd4\xbf\x87\x94\x92\'\xb0F\xc2?aeg\x83\xa9K\xc4\xbf\x90\x9d\x15\r\xb9\x14\xc6\xbf\xe9\xd3#x\x96\xac\xd1?S\xb0\xab:\xa3\xea\xbb\xbf\x89\x94+\t\x1a\xb5\xac?|9V\xdd\x8c\xd7\xd1?\xbcK5\x0e5\x1f\xc9\xbf\xfe\x13I\x9c\xf9\xcb\x9b\xbfI\xa2\xd0\x1bcm\xa1\xbf\xcf\x1d\x98\xd4\xb6\x0c\xcf\xbf\x83zh\xd7\r\x0e\xcf?BS\x9e\xfckr\xb3?\x1ff\nq\xb5,\x9c\xbf\x98t\xc9\xe5\xd7\x17\xc5?\x8cSThu\xcf\xcd\xbfq\xae\xf8\xd3)\x9b\xa1\xbf\x12\r\xdc\x98M\x8f\xb1\xbf>\xbb\x9a\x1a_Y\xb7\xbf\xa9\xf0\xfd\xd8\x99`\xcc?l\xfd\x9d5L\x99\xb2?\xd7\xcdK\x12\xc7(\xc1?"\xd2\x91y\x03S\xb3?\xb0V 3\x1d`\xc3\xbfj\xfe\xef`\x06\'\xbb\xbf\xd81\\\xc5)\x9c\xcc\xbf\xd4\x15h\x88y\xde\xc0\xbf\x18%(X\x8e\x1c\xd0?]c\xd4\xcc\xcdN\xc3?\xfc!\x0e\xc1\x17\xf1\xca?\xeb\xc5|\x1a\x9c\x1d\xc8?\x98\x9e"\x19e\xd6\xc3\xbf\xeaGv\xbc7\x96\xba\xbf*\x17-1\x84v\xca\xbf@\x10\xc4Ur\x12\xd2\xbf5\x9a\xb1\xfa\x00\xe4\x93?\xed\xe6\xaa\xa4\xf9k\xa8?p\x1f\xcd\xcb\xf5\xa2\xc9?\x06\xe5w\xf7\'\x7f\xd6?\xa7\xd8V\xc3u\xec\x99\xbf\n\xc9\xc4?\xa3H\xb8\xbf\x97\x95\'\x080\xd2\xa7\xbfF\x1fL\xb45\xdb\xd3\xbf\x10*\xbb\x8c\n\x93\x80?5\xac6wP9\xac?\x13-\x85\xfc\x00p\xa5\xbf\x00\x1cV\xb7a\xc9\xd0?\x86\xca\xacl\xc2\xf4\xb6\xbf\xce\xd9oE\xa4\xa4\x96?\rx~\x11\xd7\x00\xc3?\xda\xce\xcfW\xbd\xb5\xd0\xbf\xdf\x1aK\xa2\xc5\x14\xc2?\xcb\xc9\'\x8a~\x93\xaf\xbf\xc20\x02\xe4\xa9H\xc6\xbf/\xb7\xe1\xf9\x8a\xfb\xc7?\x8d\x85\xd3\xc1\xf9a\xc6\xbf\xbf\x0f\x07\xc8\xc5\xae\xb5?v\nW,\xbb3\xc4?\xf0yR\x8b\x81\xa9\xba\xbf\xb9!3}\xd2\x00\xb9\xbf\xf24\xe2\x93\xbf\xdc\xbd\xbf\x899\x83\xf6J\x15\xbf?6\xd3&\xa6\x92\x9d\xc4?{,%\x9au\r\xb0?\xec=\x86\t\xdcv\xc2?\x14k\xae\x1b\xfe\xf4\xa8\xbf$*\xddBbz\xc8\xbfr\x0e\xa0\xae\xd3\xef\xbb\xbf\xfd-{C"\xa2\xc5\xbfPr\t\x90\xfde\xa3\xbf\xb8\xa9zQ\x1c\xfc\xca?\x99+\xaf\x91\x19v\xc0?\xac1\xd7\xfb\xa6\x9a\xba?+\xf0O\xf7}b\xb8?\xfa\x84C\xdb\x02\x99\xd0\xbf\xad\xb3i\x9d\x85\xae\xb8\xbf\xf8\xd1vZK\x97\x98?LTNh2M\xc5\xbf\xf3V\xfc\xaaG\x8b\xce?\x99q2`,}\xb6?KR\xb1\x0c\x82}\xb6\xbf\xfd\xde\x93E{\x88\xd1?8\xb4\xd4\xf2E+\xd0\xbf\x957w^\xf4\xba\xc9\xbf\x05\xb5;VH\x86\xc0?4\xad/\xff\xfe\'\xd6\xbfW\xd2N\xdfp\xdc\xca?v\xd6>\x01\xb9b\xd1?OX\xaf&U\xf6\xaa\xbf\xa3\x14!\xfc\xbb\xd7\xd1?\xa9X\tW>\x1c\xbb\xbf\x1f\xd4\xfa\xf6\xb5\xbe\xcb\xbf\x96D\xa0\xd1)\xf2\x8b?\x1c\xb7\x1f\xef\xdd0\xa1\xbf\x10F\xe8\x0e\xa6\x8b\xb6\xbfV\'\xc48\x9b\xe8\xb6?\xf0\x80\x94\xc6\xe1\x8e\xc5?V>,\xc0>_H\xbf\xf3\xe1j\xb2\x81\xc4\x94\xbf\xd1|\x05^<|\x93\xbf\xb4\xe2\xeba\xeb\x9e\xc6\xbf\xd2b6\xd0\x15P\xb4\xbf\xb5\xfaiba\xce\xb2?\x1b\xd9\xea\xaa\xe5\x05\xa1\xbfD9#\xf7#\x1b\xc5?\\}q\xd0}\x96\xcf?\x8e\x0b\xb8\xec~\xb7\xc7\xbf^\x82%\xb0\xaa\xc2\xc3\xbf\xe4\x0e\xa1\x05\'>\xb1\xbf\x83\x96\xfe\x0c{\xd7\xcf\xbf\xb3\xe4YW*\x9e\xc6?\x13\xff\xf0j\xcda\xcc?\x84\xdd~\x83EF\xab?T\x82f\xe0\x05\xcd\xd3?\xd9en\xe2\xe4*\xc4\xbfI\x9dBll\xfc\xda\xbf\x19\xf1B\xc8|\xc5\xae\xbf\x1d\xd1r\xb4\xc9\x04\xae\xbf\x1a\x9a\xdd\xc4\xad$\xba?\xf4w?\xea\x83\xf9\xd4?\xd7 \xb3]\x91\xfe\xc9?O\xda\xdf\xae\x1b\xa2\xb9\xbf\xa6H\xb1n+t\xc0\xbf\x8au\x1f\xd5\x15\xa2\xb9\xbf\xa5\xb8\xcb\xbc\xa8\xc0\xd0\xbf7qO\xc3\xff\xe8\xab?w\x8f\xb5\xa4\xc7\x13\xca?\xd4\x9e;B\x9d\x10\xc6\xbf\x93\x1d\xdc5\x0b\xb8\xc7?\r\xaf\xa4*%\xa5\xd2?m\x89Z\x97H\x1b\xc6\xbf\xf8\xb1Yu\xe5p\xc3?K\x1d\xd1\x94 \xa0\xc4\xbfW\x19\x08Q\rL\xdb\xbft\xf4w\'v`\xc5?\xc9!\xa6\n\xcbt\xa0\xbf\x85`\x9b>\xc6B\x9c?\xb5\xb68e\xf8\xaa\xd8?6\x05\x15\n\x9d\x92\xb4\xbf\xed\xf1i$\x8e\t\xaa\xbf=w\xc4\xd9\x8d\xef\xb7?\xda\x86\xd0\xd3\xcd\xbd\xd5\xbfQ8\xf0\x9d\xb8_\xc4\xbf\xb3\xcc\xe5\xde\xf3_\xc1?\x10\xd0\xba\x80\xf8\xfa\xbd?P\xf1\x0b\xbe\xf9I\xc8?\xc24\xe3\x16\x10\xb0\xc3?\r\xdc\x94\xcd\x0f\xa3\xbd\xbfa\xbaV\x1e\x876\xd1\xbfh)\x9b\xbf8E\xb2\xbf\x99\xc7\xc4\xa3\x9dt\xa7?#\xc1MM\x86\x8f\xc0?x\xef\x0b\xc2\x9c\xfc\xc0?\xb2\x1e\xf0y\x8d\x0e\xbb\xbf\xa2\x9a\xc36\x9a\x8b\xa5\xbfe[I\x9f%\xa7\x8a?\x06\xfa\xf9zY\xfc\xb2\xbfc\xb0OpN\x18\xb8?\xa3$_\xb7he\xa1?\xbb\xe8^l\xb2\x83\xb3\xbf;\x8b<q\n\xa3\\?&F\x1f\x1c\xfag\x9d?\x13ql\x81\xd1z\xb0\xbf\xfb^&L\xd6\xf0\xad\xbf\x9f\xd0\xa2A\'i\xc4?HD\xd4\xc4\x98\xb9p\xbf\xc8\xcb?<\xdc\xb6\xc5\xbfL*\r\xa7r\x92\xb8?\xe0\x15FoJ\xcd\x91?\x84#aI0:\xbd\xbfW\xa8 \x94XL\xcb?Z9\xc5\x86\x99\x8c\xab?\xb1\xe0\xe2\x08.\x98\xd1\xbf\xb1M \xef\xb8P\xad?_\x8f\x02\x9e\xea\xdf\xb5?J\xe6\xeb\xf2\xb9\'\xcb\xbfz\x0b\\-|\xc5\xc6?\xb0\x06\xfd\xd7c\xa0\xd2?\xe0\xd3\x13&\x16\xd7\xd1\xbfq\xf1\xe0\xb1!\x14\xc8\xbf\xffXRg\xc1\x9c\xc7?/\xd6U\x17~O\xb5?c\xe2\xae\x16\x9agN\xbf\x17[\xc7_!i\xb2\xbf\x828\xa6$a\xf6\x9e\xbf\x80\xd4\xff.d&\xc3?re\x97=\xba>\xc1\xbfO\xd0\x90\xb0\xee\xa3\xce\xbf\xc5\xab\xe6\x14\x81[\xca?\xa3\x881\xf0\x00\xc6\xd1?6\xfc\xd1\xede\x01\xa7?\xcb\r\x16^\x08Y\xce\xbf\xea\xf8\x99\xc8u\xc8\xd6\xbf/\xe2X\xc3\x07\x88\xb6?\xf0\'\xa3PpB\xcc?\xf8rSe*\xc8\xc2?\n\xca\xbbp\x07\x7f\xc3?N\xf2)\x84\xa7j\xd2\xbf,\x9c\xf0\xa1L\xeb\xd2\xbf\x15u\x88\xca\xf7H\xb7?{\xccU\x8a+|\x97?\xcc/uZ\x898\xc7?\xd9\xa8\xc9\xd4w{\xd1?\xc6\x86\xa8 y\xd7\xbd\xbf\xb6u\xd4\x0f+D\xc0\xbf\xc9M|=4v\xbc\xbf\xdc[f)\xe0\xf7\xcd\xbf\x9f\xb8\x7f\xbd\xbd\xce\xc6?\xc2\x99rl\xa3\xa2\xd2?\xc2\x08Zd7\xee\xb6\xbf\xf9\xec\x12\x08\xa6\xa1\x9e\xbf\xa9\xacJy\x1b\x9f\xb8\xbf\x1b0\x8d2n1\xc8\xbfa\xca\x97\xd2\xccR\xc5?ev\x03\xfciy\xc7?j\xa8\xac\xc5\xc7\x99\xad\xbf\xe0\xd5r5\x90\xcd\xb8?O\x91\x17\x18\xc6\xb4\x94\xbf|Z#\xe5dG\xd1\xbf\x1b\xacz\xc8\xa1\x97\x8f\xbf\xdcpl\xb8\xf7=\xb6?\x88\x03&\x1fZy\xc0?\xbc\x10\xfb\xe0\x9d\x19\xc8?,s\xf2]\x97\x98\xd0\xbf\x03\xd7VL\xaa$\xc5\xbf\xc7\x18\xbc\xecW&\xd1?Y\x00C\xb1\xc8$\xb5\xbf\xe1|^\xf5\x82D\xbb\xbf,\xe1\xfaN!5\xb9?&\x16]\x11a\xde\xbb\xbf\'\xe2ea5\'Q?\x92\x90\x06R\xd6\xe4\xcd?_(x_U\xd6\xa8?q\xf7\xdb\xbc\\\x1c\xb1\xbf%\x02\xa6\xf3\xc09\xa8\xbf\xd3\xda\x93>y\x00\xc7\xbf\x8b\x86kJS\x9e\xb9\xbf\x10R\xf3u\x02\t\xbe?Ek\xcf\x9a\x11f\xc4?L\n\x16)\xb8\xf3\xb4?r\x7f\x9c\x9c\xa2\x86\xa0?V\x01\xe9\xe9`e\xa6\xbf=\xf5\xca\xd2\x00\xbc\xd0\xbf\xb3@(\xa2\xd4L\xa0\xbft\x07v\x9d!!\xd2?\x8c\x96l%\x8cV\xbc\xbf\x98\x82G\xab\xd8\x07\xbf\xbf\xc3Z \xcd\xdc\x85\xd2?\x02>t\xbe\xa7\xf4\xc5\xbf\x84\xed\x84\x9b\x89\x95\xd4\xbf\x8dG\x1cDR\xf4\xdb?QK^\x18\xbe`\xc7?\rjz\x05\x88g\xd5\xbf\xeb\xe6\x1aJ\xcbZh?Z?\x03\xf43\xb3\xac\xbf\x12\xeb$a\xbd\x0c\xa7\xbf\xd2Wu)[\xe3\xcf?k\x1dm\xaa0\x0b\xa9\xbf\xbajY.\x07\xae\xb6\xbf>`M*\x9f\x9a\xb5?\xd5+`\x90\xb6\x0f\xbc\xbf4\x03\x80\x04\xaf\xde\xa1\xbf\xe5\x8b\xdc#\x15v\xb4?\x01\xae\xc1o\x07)\x94\xbf\x9e\xdd\xfa\x05v\x86\xb8?B\xb5\x14\xb4\x10\xd1\x9b?\xca\xb4\xc6\x9dw\xe1\xc4\xbf\x14Bn\xc6\xaeG\x95?Z\xca\xb2^\x0f7~?\xe1w\xcaB\xd3\xbd\xa0\xbfM\xda\x00v`m\xc1?\xa6\xab6\xf1st\xa2?>9\xe9\x1fe\xc0\xb7\xbf\xfc\x00\x0e-4\xa4\xa1\xbfA"\xef\xf5\xf4\xf1\xab\xbf\xf5\x07\x82C\x87\x14\x8a\xbf\x00\xc3\x0f\xdbKZ\xc0?\x18\xc5L\xf4\x1d\xc1\xb9?`\xde\x8c\xc0\xc7g\xb4\xbf\x94\x88S\xe8TS\xc7\xbfY\xc2\xf9\x93\x8dsh\xbf\x0b\xfe\xbbs\x7f\xf9\xc4?O\xdd\x89a!\xde\xac?#X\xa1\xfd} \xbc\xbf\x05\xb7\x10\xacID\x83\xbfA\xdd\x11[\x93\xfa\xc6?\x83l\x95V\xc7\xcb\xc1\xbf\x8b\x0b\xbe\xa6\x16\x86\xd3\xbf\xb1\'\x98f\xa2\xa0\xc8?w\xbfg\x182\xcd\xbe?\xb2V\xe3\xacp\x01l\xbfZ\x16\x93:\x01\xb9\xd9?\x93\xd2\xb9/r\x99\xc3\xbf\xbd\x84<\xd5G|\xe3\xbf&\xfa?C\xfd\xcd\xa6?\x85?\xe6\xc8,Y\xd0?\x89 \x0c\x1bNT\xa3?\xb1J-\xa9\xad\xac\xbe\xbfd5\x86\x10\x85\xc4\xa3?\xa4\xab\x0e\x8d\xb0\xb1\xcc?\xe9\xf1\xf7\xa3m\xeef\xbf\xf2P\x94=\xeb\xa8\xb1\xbf\xff\x8e{\xb90\x01\xb4\xbfY\x8c\xf3\xf4\xbc\xce\xc8\xbf\xdf\xbc{W]"\xa0?\xc2\xa4\x14dq\x1e\xc8?\x10x\xad\xe1\xe3\xd2\xb9?:\xc3\xf6}\x82{\xbc?\xe6\xe1\x97V\x81S\xc6\xbfd\x04\x13{\x05\xff\xd3\xbf\x02g\x80\x00\x94\x9f\xb9?,\xd9\xba\xa2\x9a\x97\xd1?\x05^\x0e\xa9\xfc|\xb7?{PTp\xf9\x1b\x93?\x85I\xb88f\x9d\xcd\xbf\xca\x14\xf2\x984\xd9\xd2\xbf1\xe5P\xf6\x06z\xc8?\xf6\xb8l4\xd7\xe7\xcf?>\xa6\xc5\xbaB\x92\xb2\xbf\xac\x8e\x08\xb9p\x93\xb1?m\xa7\xbf\xdb\x98LU?\xefS.p\x1b)\xd8\xbf\x12lZ-\xc7\xcb\x9e\xbf\xdd\xe0\xba\xe2\xf4"\xd7?\xf1r\xadw8x\xb2?\xebxD\xd2\xcc\x93\xcb\xbf/Uv\xbe\xcf4n?\xf9A\x15\x0f\xc8)\xbb?\xd6^\x89\x01\x10\xd0\xbc\xbf\xd5 \x85\xf4\x9d\xe7\x94\xbf\x9d \xf2\xc7\xb2\x19\xb2?;\xa0E`\x08p\x91?\xb7#QQ\xc5.\xb2?a\xd4\x07s4&\xa1\xbf\'o\xf9g6\x1a\xb9\xbf[\xa64\x08|\xfa\xaf?\xc3\xb5\x85\xa7\xc7F\x9c\xbf\xcb\xb2\xa1\xbe\xa1\xfb\xc3\xbf8#\xc6J\x15w\xba?\xd5xrq\x82\xce\xcb?\x92T\x16\x8cL(\xb5\xbfC\xbaj\xc0\xc3\x87\xaf\xbf\x078\x12L\xc1U\x98?\'\xc9\xea\xa0N\xa9\xcb\xbf\x82\x04\xf98B\xcc\x86?>\x05B\xe1!\xf9\xdb?\xa6\xf4\x06\rb\x96\xa5?\x80\xdd\xdcY\x18^\xdb\xbfx\xeb\xbd\x90\x85{\xc2\xbf\xc7\xb3/}\x18]\xc3?\xde\x85\xf1=(\xc8\xc2?\xbe\x9b\x96\x82P^\xc0?\xd6\x05\xe9\xdah{\xa9\xbf\xecj\xc4\xbf\x06\x94\xd3\xbf\x1b"$do\xed\xab?\x85\xff\xf1\xaf\x1ce\xd8?Iv]"\x8c$\xbc\xbf\x17-\x8a\xd5#\xee\xd3\xbf\x9b\x14\x0f\xeav\xa9\xad?\xee\xd6\x1aZ\xe9\xd9\xbf?\xf5^\xb8\x83=J\x9e?(\xe5\xdf\xfb\xa0\xe2y?\xdf\x9a\xad\xbc\xe3\xc6\xb3\xbf\xa5\xf70\xbe\xe0\xe1\x89\xbf7\xdb\x86\x16\x1aL\xa9?\xb2\xe5\x9f\xd2\xf7W\xbf?{\xabp`\x8d\xa4$?\xfb\xa9;\x04\xd3\xc4\xd6\xbf\xf5\x9fDn\xb0F\x9f\xbfZ\xf2\xaa.T\xe3\xd8?A\xd9\x7f\x9fF\xed\xa8?qZ\x1f\x8a-\xd3\xb1\xbfCQ.\xec//\x87?\x80\x1eu{\xca\x1d\xd5\xbf\x1e6\xbc\xf9\x80\x17\xc2\xbfe\xee\xc6\xd8\xb8l\xdb?\x0b\x02\x965\x1b\xc1\xcb?2\xadn\x16\x7fU\xd0\xbf\xa6W\xa6]\x10\x99\xc4\xbf\x19\xf8\xe8\xc3\t\xd0\xb8?\xc0\x01\xfb\x86\x81W\xa6?\xf9\x96\x88znM\xa3\xbf\r|.b \xbd\xa3?WP\x12\n\xb8\xe8\x8e?D\xf3\x14\xb7\xc4\xf4\xad\xbf&\x8a\xf0ZC^\xa9\xbf\xd4\xfa%x\xd3-\xa9?8\xcf(\x8d\xf42\xbb?-9Pg\xd2\xfa\x8a?\xfdr\xbe\x8e\xea\xd0\xbb\xbf\x80\x1f\xaa)2\xab\x96\xbf\xd0A\xb8\x1b\xe5d\xb5?\x9dC\x8c\x96\xde\x83\xb2\xbf\x11\x81\x1eLh\x94\x95\xbf\x92?\x03o\x00h\xce?\xe7:\n\xfc\xff\x03\x99\xbf\xb4\xf1\x98\xb6\xb7\x08\xd5\xbf\xf4>u}\xb8\x1a\x85?\xde\x8c@\xa3\x89\xe2\xc3?\xc3IK(\x0e\xb9\x88?\xcc\xbf\t\xe3\x19\xfe\xc2\xbf\xc8a\xdbT\xfb\xfc{?<\xc5\x08\xea\xec\x10\xd1?C\xacv7#\xc4\xb8\xbf\xbdF\x0en\x89I\xbc\xbf\xa7\xc5\xf1{/J\xc3?Y}\xc2\xacc\xe8\xbb\xbf\x82\xb3}g\xbf[\xbf\xbf*Zj\xfdt\x1f\xcb?\x1a7\xd5 \xe5l~?\x057\xdf#lK\xc0\xbf\xf1\xe1i\x08\xf2C\xae?F\xb5\xf3\x8b\xcfb\x82\xbfa\xc7M\xbc\x8bN\xab?\x07\xc2H\x1f\xcc"\xbf?!\xa0\xea\xd0:\xfb\xcf\xbf\xef\x88\x05\x91\xcco\xcf\xbf\xcc\xea\xeaX\xec/\xd4?\x0c.\x05X\x97j\xd4?i]`\xccD{\xd0\xbf\xc8\x17p@\xea\xc9\xd1\xbf\x06\x85\x7f\xe8\xe1O\xc3?\x83\xa9\n\xb3\xb9\xf3\xaf?\x16\x1f\xae\x15\x81\xba\xa0\xbf\x97B\xfd\xf3/\xf3\xcc?\xa9~\xf5\xf3\xcd\xf0\x9b?\x19"\xfc\x8e\xc8F\xd7\xbf\x9d\xca\xb5\x86<\xe4\xaf\xbf\xf4\x10\xceQFa\xcf?\xdb6\xa3\x9a\xcf\x01\xb3?\xce\x99\xf7S\xf8\x15\x81\xbf\x04(\xe4d\x17L\xa2\xbf\xfaT*\x0648\xab\xbf\x8f\xbc\xf31l\x04\xa9\xbf\x0clrY\x05\xf6\x90\xbf\x03=\x82\xa4\xe0\x8f\xc1?\xd6#\xbdV\xb7\xc4\xbb?\xca\x19\x1ca\x98\xf8\xc7\xbf\xd1\xcb+\'B\x9e\xc7\xbfz\xb4F\xbbx\x80\xc1?\x84\x10\x84*\xbfQ\xbd?\x9e\x16|\xeb\x0c\x11\xa6\xbf\x13\xcf\xc46\x7f7\xb4?\xa4\x0b)\x15$]\x83?b\xb68L%&\xd0\xbf\x03\xea\x0f\xce\x89d\xa5\xbf\xe8\xc8\x0b\r|\xc3\xd4?\xf9\xe8\xbd^\xae\xec\xc0?i\xc7\x1b\x8a\xf7\xaa\xd7\xbf\xe7\xdf\xcc}\xc0t\xc4\xbf\xb3T\xe2\xc8;{\xd8?\x80N\xa2h\x82\xb9\xbe?yeQ\xca\x81\xe5\xd1\xbf\x82YK\x1e$M\xbd\xbf\xab\xac\x05D-\x16\xb7?e\x0b\x81_\nB\xb9?\xe9\xb5\x9a\x93Pd\xc0?e/*%Q\x8c\xa1\xbf*\xc1\x0c\xd4-%\xcb\xbf\x9a<\xfb\xd4w_\x86\xbf\x9c\xb2\x8f\xa7k\xae\xac?\xe3Z%q\x1az\x91\xbf#\x10\xd0\xca\x1cK\xcc?z\xa8\xa39\xab\x8b\xac?t\xa8c\x0e\xeeb\xd4\xbf\xfd\x05\x9a\xd2A\xa3\xa0\xbf\xe9+I\x92\xc0\x8a\xc0?\x90\xf5\xe4(\x9e?\xc1\xbfY\x0e\xed\x12\xc4\xbc\xc1?\x92\'9\xbd\x04\xaa\xd6?\xc5*\xc3\xda1\x94\xcc\xbf\xce\xbe\xb1n\xf8\xd7\xd9\xbf\xdd\xe9\xa6\xde\x9bMx?Pcx\xd8\x85\xef\xd3?\x1dT\x13~\x82f\xcd?-]R\xee})\xc5\xbfu\xa4\xaa@\xb6\x9b\xd2\xbf\x05\x06\xbe\xcf\x07\xf9\xb0?\xfbO_\x9fS\xd0\xcb?3\xe6\x15~\x86*\xa2\xbfNC:"Z\x10\xc0\xbf\xb7\xbbyS\xf8\x0e\x8a\xbf\\\xe9\xb3\x1fs\xa6\xb8?\xfe\xd9c\xc5\xaa\xa5\xb0?6\xe9h\xc5\x0e\x87\xb1\xbfO&&\x8b\x07\t\xb7\xbf-\x97X\xe2\xf4\tv?\x9ay0\xcc\xaeu\xba?\x893\x18\xd6\xb3*\xb5?\n\xe6\x0b\x83s\x81\xbb\xbf"\xb4E\x14\xfb"\xc6\xbf2\x82\xa5\xacC\xf8\xb6?\xb6Cn\xcb?\xdd\xcc?\x92\xff\x17\xd5\xc0H\xa7\xbff7H\xb3\x05\xba\xcc\xbfl*\xd4l\x83\xf3f?\xaf\x1fh8\xf5\xba\xcc?\xff\xed\x84\xdb\xdb\x92q\xbfi\xab\xae=\x89\xb3\xc9\xbf1\xd9\x17\x90\x9emz?\xa2D?b\xd5\x91\xb4?\xde0\xbbqGf\xa1?\x82\xe3\x88D:\xe7w?~\x07\xbd^6\xdf\xba\xbf\x8e\xab\xcb\xf0\x9a\xa4\x93\xbft\xbc\xd5h\x1fc\xc9?\xe8o(\xcd\xf8\xd8\x9c?\xb7\xb3\x0b]\xd6-\xd5\xbfDOm&\x05$\xa1\xbf6;\x902\xeb6\xda?=+W\x9c\xbbu\xa4?\xba\x8d\xe6x\xceE\xd4\xbf\x8d#\\\xe1=\xa4\xad\xbf\xa6\x04\xe4\x16\xa2\x0b\xc4?7\x1f\x1a>c#\xb0?\xf6\xb8rx-\x04\xb9\xbf,\x04\xdd"&\x8a\xa9\xbf\xbb\xb4\xa7\x842.\xba?\xad+\xee\xa8\x8a\x03\xae?Sm\xb9y\x81\x18\xb5\xbf\xda\x81\x18hZ\x8b\xb6\xbf\x92F \x1f\xcd\xe0\xaa?\xfc\xd8\xa0/@6\xc2?\xf0\x8e\xb7=\xd0\xa3\xa0\xbf`\xec\xf2$\x98\xe2\xcb\xbf\xbb\xb5V\xd1\xb4\xa3\x87?\xbe\xd1h\xed\xa2\x0f\xd3?\xe9u=\xf8\x00q_?\xa4\x85\xa6p\xa0Z\xd6\xbf\xe0\x9f\xdf\xedq4_?"\x10\x84?\xeb\xd5\xd6?\xfaw\xcc\xef\x0b/\x83?\x91<Ek\xc3\x8d\xd1\xbf\xf9\xf3\xc5\x97\xcd\x1b\x8a\xbf\x84\xdb\xee\'\x04\xed\xb8?M>\xbd\x91\xbfI\x92\xbf\x17\x84\x95\x14\x84J\xbc?\xd7\xae\xd4\x95\xff"\xaf?\xe5\x01\xc1z\xb8V\xcf\xbfZ\xaa\x17L\x9c\xff\xa4\xbf\xfb-\\>n@\xc4?\xa6\xa3\xe6\x82\xd9\xcb\xab\xbfI\x16\xc1\xfd%r\xa5?\x7f\xf0\xab#\x99\x0f\xc9?p\x99y\x8e\xf3\x07\xc6\xbf5\x15w"\x0e\x17\xd1\xbfw6WuXC\xc8?\xd4\xc4\xff\xc6B\x0e\xc9?$\xbf\xe8)\x90q\xc3\xbf"\x1eE\xff7m\xc1\xbf\x97\xf5\x93\x9c["\xbb?x\xb3\xe4\x8c@\x9e\xa2?4\x9a\xc3\xc7\xce\xb7\xb3\xbf\xac\xf5\x90\xb2f\x8d\xba?9\x9bR\xbc\x87)\xc1?\x1e\x1a\xf5\xab\xe7\x8d\xc9\xbf~]\xf2\x07\x12"\xd1\xbf\x12\x05i\xcd?b\xcd?=]\x8clXf\xd9?\x99\xfe"*\xd5\xd4\xc0\xbf\xd5\xf9\xe6*\xf0\xdd\xd6\xbf\xb0\xb7\xbe\x93\xc0\x0c\xa2?A\xc84\x0f\xfd\xfc\xc1?%\xcb +/\xb9\x7f\xbf\xa0y\xcb\x02\xeb\xcd\xc1?\xbb\xb1(\x1cy\x06\x87?\xda\xb3Q*{\xd0\xd0\xbf*?"rm,\x8b\xbf\x18\x7fn<{\xea\xbd?\xd5\x9f\x16@\x93f\x90\xbf\xbb\xd6>\xf1|L\xc3?\xec\x1e\xd4F\x94\xdd\xb0?\xcc0\x8cQA\xf3\xdb\xbf\xfa\x96O\xb2Lu\xb1\xbfXB\xa2\x87)y\xe1?\xc02\xab/1%\xb1?\xa0\xfb\x12\x88\xb2\x88\xdf\xbf\x05L\xe1{\x84D\xbf\xbf\xcb\x8eS\xa1wM\xd7?\xff\xbd\xe1D\xf2\x8e\xba?\x9b\x8a\xe0\x80\xc4\xda\xc8\xbfZ\xd7M\xdb6x\xa9\xbf\x8bn ;\xe2\xf8\xb5?\xe5\'\x00\x84Iz\xa4?\xdc\xb8\x07\xed\xed\xc4\xa6\xbf$\x94x\x02\xad-\xa0\xbf\xc1C\x1f\rDb\x9b?\x1d2G\x89R\xa5}?\x0c\xca|\xa3c\xf4\x7f\xbf \x18\x1a\xb9\xa3\x90\x91?b\xaaWo<[~\xbf\xc3C\xda\xa3\x8a\x99\xaa\xbf\xcc\xcf5\xaed\xbfp?\x1c\xfc\xd2\x97\xea\xb4\xae?1bk0\x06\xd5\x97?\xb8\xf3\xc03\x08\x7f\xa3\xbf\xef\xae\x01\xe2\xe9W\xa3\xbfvaf\xf36\xab\x9c?\x8a-%\xe6\xb3\xa4\xaf?\xc0i!ho\xc6\x94\xbf\xd3)\x94\x8e\xa0\xa4\xb1\xbfC\x1d\x84\xaf|\x8d\x82?\xa5\x93\xb3\xc8\xd4\x00\x9c?'
|
434 |
-
p179
|
435 |
-
tp180
|
436 |
-
bsg79
|
437 |
-
g64
|
438 |
-
(g65
|
439 |
-
(I0
|
440 |
-
tp181
|
441 |
-
g67
|
442 |
-
tp182
|
443 |
-
Rp183
|
444 |
-
(I1
|
445 |
-
(I34
|
446 |
-
tp184
|
447 |
-
g74
|
448 |
-
I00
|
449 |
-
S'J\x94!\x9e\xa9\xc4P\xc0\xc87\xa4W\xe2LS\xc0\xa8\xbb\xd0S\xb5nG\xc0\xfd;\xcd\nS-S\xc0\x8d)\x83)\xdc\xb8:\xc0\xb1_\xb6\xb4\x81\x8fR\xc00\x81\x8ci\x18\xa5\x1c\xc0x\xf2\xc5\xcc\xb4qQ\xc0\x89p\x95\xd0\xc0\xfa%@\xaf\xa9c|\x1e\xd8N\xc0R\x98!\xa8L\xdd:@\xce\rTB\xf4\xa9H\xc0pu\xc1\x16\n\x13D@\xcad\xd03H(A\xc0\xb2\x14\xc6\xbb9TI@\xe0\xf7T\xfd\xe0\xb31\xc0\'E\x0c\'\xa5\xadJ@\x88a9\xdd\xed\x0f\xee?e\x81#4\x89\xd8H@=\xe4\xb2\xbd\xd083@\x80\xbe\x81\xd2t\\C@\xb4\x1f\x00\\\xd8\xacA@\xe3\xca\x07\x8dR=9@\xe7?\xe0\x02S\x01I@k\xe0\xe7!\x0cc"@\xa0\xcbE\x15a\xf1N@\xf8\xb3\xf0\x85J\xa5"\xc0S\'\xf6\x01\xb6cQ@j_g~\xf7 =\xc0J\xf8<oDZR@\x05Ol\xe59\x98H\xc0\x12\xd0\x13\xff\x7f\xcaR@\xf2\xef\xa4\xef\xc3YQ\xc0\x87\xe0\x05\xe7\xfc\xdaR@'
|
450 |
-
p185
|
451 |
-
tp186
|
452 |
-
bsS'n_samples'
|
453 |
-
p187
|
454 |
-
I3148
|
455 |
-
sS'_n_active_components'
|
456 |
-
p188
|
457 |
-
I30
|
458 |
-
sbsbasS'reference_shape'
|
459 |
-
p189
|
460 |
-
g0
|
461 |
-
(g87
|
462 |
-
g2
|
463 |
-
Ntp190
|
464 |
-
Rp191
|
465 |
-
(dp192
|
466 |
-
g91
|
467 |
-
g64
|
468 |
-
(g65
|
469 |
-
(I0
|
470 |
-
tp193
|
471 |
-
g67
|
472 |
-
tp194
|
473 |
-
Rp195
|
474 |
-
(I1
|
475 |
-
(I17
|
476 |
-
I2
|
477 |
-
tp196
|
478 |
-
g74
|
479 |
-
I00
|
480 |
-
S'\xcf\x1c\xa2\xfci\xe0L@\xf3\x80\x82\xa8h\x0e8@\xd9AM\x15\x0c\x9aS@\xe1\xd6\xf7\xf5\t\xa98@4\x06\xdeIZ\xc1X@\xdf\xf9\xf4\xbch@;@\xc4\x89X\xbe\x89\xc2]@\xe3y\xb3\x9dB\xdc?@G\x97\x12\xff\xda0a@\xd5\xa7H\xd0\x89\x11D@_\xaa\xe1r\x9c3c@$\x9a\xb4\xbd\x02bJ@b\xf0\x8f\x07\xa8\xe0d@\xe6\xc4Y\x0e\x02\x07Q@l|v\x0b\xd42f@\x88\xd5\x88\xc8\xabDU@\x8d\xec\x15\x92-\x89f@z\xd6\x12\x832\x07Z@\xf5\x0eQ\xb0O\x14f@\xf6D\xdfXL\xb2^@\x95\x8d\xc5"\x04\xb3d@\x1a\x91\x8eR\x87fa@\xe0hL\xf6O\xffb@\xff1E\xbb;Dc@*\xdbn\x9d\xcd\xf6`@:\x1a\xb6-=\xc7d@\x15<-Q\x8a7]@)\xd3b4|\xc2e@m\x0f`\xc2\xd8\'X@ud\x1e\x86oBf@\x98\x9a1\x15\xf0\x06S@\xb1\x97\xe5s\xc6~f@\x06;A\x05\x99\xbbK@\xaa\x13\xc2\xfb_\x8af@'
|
481 |
-
p197
|
482 |
-
tp198
|
483 |
-
bsg98
|
484 |
-
Nsbsg48
|
485 |
-
(lp199
|
486 |
-
g49
|
487 |
-
asg51
|
488 |
-
g34
|
489 |
-
sg46
|
490 |
Nsb.
|
|
|
1 |
+
ccopy_reg
|
2 |
+
_reconstructor
|
3 |
+
p0
|
4 |
+
(cmenpofit.clm.base
|
5 |
+
CLM
|
6 |
+
p1
|
7 |
+
c__builtin__
|
8 |
+
object
|
9 |
+
p2
|
10 |
+
Ntp3
|
11 |
+
Rp4
|
12 |
+
(dp5
|
13 |
+
S'opt'
|
14 |
+
p6
|
15 |
+
(dp7
|
16 |
+
S'ablation'
|
17 |
+
p8
|
18 |
+
(I01
|
19 |
+
I01
|
20 |
+
tp9
|
21 |
+
sS'verbose'
|
22 |
+
p10
|
23 |
+
I00
|
24 |
+
sS'rho2'
|
25 |
+
p11
|
26 |
+
I20
|
27 |
+
sS'sigRate'
|
28 |
+
p12
|
29 |
+
F0.25
|
30 |
+
sS'ratio2'
|
31 |
+
p13
|
32 |
+
F0.08
|
33 |
+
sS'imgDir'
|
34 |
+
p14
|
35 |
+
S'/Users/arik/Desktop/artistic_faces/applications/AF_sample'
|
36 |
+
p15
|
37 |
+
sS'dataset'
|
38 |
+
p16
|
39 |
+
S'demo'
|
40 |
+
p17
|
41 |
+
sS'ratio1'
|
42 |
+
p18
|
43 |
+
F0.12
|
44 |
+
sS'smooth'
|
45 |
+
p19
|
46 |
+
I01
|
47 |
+
sS'pdm_rho'
|
48 |
+
p20
|
49 |
+
I20
|
50 |
+
sS'sigOffset'
|
51 |
+
p21
|
52 |
+
I25
|
53 |
+
sS'kernel_covariance'
|
54 |
+
p22
|
55 |
+
I10
|
56 |
+
sS'numIter'
|
57 |
+
p23
|
58 |
+
I5
|
59 |
+
ssS'_shape_model_cls'
|
60 |
+
p24
|
61 |
+
(lp25
|
62 |
+
cmenpofit.modelinstance
|
63 |
+
OrthoPDM
|
64 |
+
p26
|
65 |
+
asS'max_shape_components'
|
66 |
+
p27
|
67 |
+
(lp28
|
68 |
+
NasS'scales'
|
69 |
+
p29
|
70 |
+
(lp30
|
71 |
+
I1
|
72 |
+
asS'diagonal'
|
73 |
+
p31
|
74 |
+
I200
|
75 |
+
sS'holistic_features'
|
76 |
+
p32
|
77 |
+
(lp33
|
78 |
+
cmenpo.feature.features
|
79 |
+
no_op
|
80 |
+
p34
|
81 |
+
asS'patch_shape'
|
82 |
+
p35
|
83 |
+
(lp36
|
84 |
+
(I8
|
85 |
+
I8
|
86 |
+
tp37
|
87 |
+
asS'expert_ensemble_cls'
|
88 |
+
p38
|
89 |
+
(lp39
|
90 |
+
cmenpofit.clm.expert.ensemble
|
91 |
+
FcnFilterExpertEnsemble
|
92 |
+
p40
|
93 |
+
asS'expert_ensembles'
|
94 |
+
p41
|
95 |
+
(lp42
|
96 |
+
g0
|
97 |
+
(g40
|
98 |
+
g2
|
99 |
+
Ntp43
|
100 |
+
Rp44
|
101 |
+
(dp45
|
102 |
+
S'sample_offsets'
|
103 |
+
p46
|
104 |
+
NsS'cosine_mask'
|
105 |
+
p47
|
106 |
+
I01
|
107 |
+
sS'context_shape'
|
108 |
+
p48
|
109 |
+
(I8
|
110 |
+
I8
|
111 |
+
tp49
|
112 |
+
sg35
|
113 |
+
g37
|
114 |
+
sS'response_covariance'
|
115 |
+
p50
|
116 |
+
I3
|
117 |
+
sS'patch_normalisation'
|
118 |
+
p51
|
119 |
+
g34
|
120 |
+
sS'_icf'
|
121 |
+
p52
|
122 |
+
Nsbasg47
|
123 |
+
I01
|
124 |
+
sS'shape_models'
|
125 |
+
p53
|
126 |
+
(lp54
|
127 |
+
g0
|
128 |
+
(g26
|
129 |
+
g2
|
130 |
+
Ntp55
|
131 |
+
Rp56
|
132 |
+
(dp57
|
133 |
+
S'similarity_model'
|
134 |
+
p58
|
135 |
+
g0
|
136 |
+
(cmenpofit.modelinstance
|
137 |
+
_SimilarityModel
|
138 |
+
p59
|
139 |
+
g2
|
140 |
+
Ntp60
|
141 |
+
Rp61
|
142 |
+
(dp62
|
143 |
+
S'_components'
|
144 |
+
p63
|
145 |
+
cnumpy.core.multiarray
|
146 |
+
_reconstruct
|
147 |
+
p64
|
148 |
+
(cnumpy
|
149 |
+
ndarray
|
150 |
+
p65
|
151 |
+
(I0
|
152 |
+
tp66
|
153 |
+
S'b'
|
154 |
+
p67
|
155 |
+
tp68
|
156 |
+
Rp69
|
157 |
+
(I1
|
158 |
+
(I4
|
159 |
+
I34
|
160 |
+
tp70
|
161 |
+
cnumpy
|
162 |
+
dtype
|
163 |
+
p71
|
164 |
+
(S'f8'
|
165 |
+
p72
|
166 |
+
I0
|
167 |
+
I1
|
168 |
+
tp73
|
169 |
+
Rp74
|
170 |
+
(I3
|
171 |
+
S'<'
|
172 |
+
p75
|
173 |
+
NNNI-1
|
174 |
+
I-1
|
175 |
+
I0
|
176 |
+
tp76
|
177 |
+
bI00
|
178 |
+
S'hJ#\xf4?\xff\xcc\xbfa4\x1c\x16\x1b\xb0\xd0\xbfu,\xd1B\xbfB\xc4\xbf\t\xfc5^\xd1\x94\xd0\xbf\xa1\xac\xdb\x94\xe8\x1a\xb7\xbfu@a\x98\\\x0c\xd0\xbf\xf3\x01\x17Q\x84\xc4\x98\xbf&\xfcP\xff}*\xce\xbf\x16\xf3\xa8j#\x01\xa3?\x92bT\xfaT\xab\xca\xbf\xf7"\x87hj:\xb7?\x0b\xcc\xf2\x90RS\xc5\xbf\xbc\xc9\x1a\x87p[\xc1?/\xc3]D\x85\xab\xbd\xbf\x12\x08\x86\xf1\x8b\xe6\xc5?\xdf\x0e\xc0v\xec\x9c\xae\xbfO\x9a$.6\x11\xc7?\x88\xcd3*=\xfei?sj\xdde\x99{\xc5?.\xd8\xe5\xe3\xc0\x9e\xb0?\x852\xe9\xf8\x91\xbd\xc0?\xd5O\xdb\xa7\xc2\x90\xbe?*\xf0\xb9`\xbe\xd2\xb5?\x9f\xb1\x17\xd8\xdd\x9e\xc5?\x8b\x8f\x14,\xd7\xcb\x9f?\xa7\xa8CH,\xc1\xca?N|\x8b\x8e2\x1f\xa0\xbfAqXQJ\x12\xce?\xe4\xb0\xe6\x1c\x9f/\xb9\xbfp\x0eSA\xa8\xbc\xcf?\x95\xf9z\x7f\xfeC\xc5\xbfGh3\xb3^?\xd0?\xa0\x02Cm\x17\x01\xce\xbf\xb4q\xf3G\xa0M\xd0?c4\x1c\x16\x1b\xb0\xd0?lJ#\xf4?\xff\xcc\xbf\n\xfc5^\xd1\x94\xd0?v,\xd1B\xbfB\xc4\xbfv@a\x98\\\x0c\xd0?\xa2\xac\xdb\x94\xe8\x1a\xb7\xbf(\xfcP\xff}*\xce?\xf4\x01\x17Q\x84\xc4\x98\xbf\x93bT\xfaT\xab\xca?\x18\xf3\xa8j#\x01\xa3?\r\xcc\xf2\x90RS\xc5?\xf8"\x87hj:\xb7?1\xc3]D\x85\xab\xbd?\xbd\xc9\x1a\x87p[\xc1?\xe1\x0e\xc0v\xec\x9c\xae?\x13\x08\x86\xf1\x8b\xe6\xc5?\x9b\xcd3*=\xfei\xbfP\x9a$.6\x11\xc7?/\xd8\xe5\xe3\xc0\x9e\xb0\xbfwj\xdde\x99{\xc5?\xd8O\xdb\xa7\xc2\x90\xbe\xbf\x862\xe9\xf8\x91\xbd\xc0?\xa2\xb1\x17\xd8\xdd\x9e\xc5\xbf*\xf0\xb9`\xbe\xd2\xb5?\xaa\xa8CH,\xc1\xca\xbf\x8b\x8f\x14,\xd7\xcb\x9f?BqXQJ\x12\xce\xbfO|\x8b\x8e2\x1f\xa0\xbfq\x0eSA\xa8\xbc\xcf\xbf\xe7\xb0\xe6\x1c\x9f/\xb9\xbfHh3\xb3^?\xd0\xbf\x97\xf9z\x7f\xfeC\xc5\xbf\xb5q\xf3G\xa0M\xd0\xbf\xa2\x02Cm\x17\x01\xce\xbf\x14\xaf\xd2Hh\x0b\xcf\xbf\xef&z\x0c\x7f<\xb5\xbc\x14\xaf\xd2Hh\x0b\xcf\xbf*>\xa3\xae\xdf\xf1\xb2\xbc\x13\xaf\xd2Hh\x0b\xcf\xbf\xa1\x8a\x90\xac\xbem\xab\xbc\x12\xaf\xd2Hh\x0b\xcf\xbfiMT\x91\x1c*\x9e\xbc\x12\xaf\xd2Hh\x0b\xcf\xbf^\x9e\xc6\xb2\xccw\x81\xbc\x15\xaf\xd2Hh\x0b\xcf\xbf \x16\xf3\x95\x10\xd8\x8c<\x14\xaf\xd2Hh\x0b\xcf\xbf\xda\xec\x02\xdfu\xa5\x9f<\x16\xaf\xd2Hh\x0b\xcf\xbf\xf3\x84\x07q\xee\x80\xa8<\x1b\xaf\xd2Hh\x0b\xcf\xbfE\x8c5\xbb\xaf*\xad<\x1c\xaf\xd2Hh\x0b\xcf\xbf\'\xa1P\\\x03\xef\xad<\x1f\xaf\xd2Hh\x0b\xcf\xbf\xf6\xbb\xb2\x0c\xb4\x86\xaa<"\xaf\xd2Hh\x0b\xcf\xbf\x7f\x0ec\xffh=\xa5<&\xaf\xd2Hh\x0b\xcf\xbf/\xf7"<\x1c(\x9f<%\xaf\xd2Hh\x0b\xcf\xbf(\x9ar\x93G\xc8\x8b<\'\xaf\xd2Hh\x0b\xcf\xbf\x16\x89\x08\xbe\x8d9|\xbc)\xaf\xd2Hh\x0b\xcf\xbf2\xfeZ%\x00\xf8\x9a\xbc)\xaf\xd2Hh\x0b\xcf\xbf.-T\x8d\xacG\xa7\xbc\xe2\xa5\xf3\xf4\xc6*\xbc<\x12\xaf\xd2Hh\x0b\xcf\xbf\xd5R\x1azA\xcd\xb4<\x12\xaf\xd2Hh\x0b\xcf\xbf\x9a\ti\x19cD\xad<\x10\xaf\xd2Hh\x0b\xcf\xbf\xb1\x9et\x82%\x97\xa0<\x0f\xaf\xd2Hh\x0b\xcf\xbf\xf9L([\xb9\x11\x82<\x11\xaf\xd2Hh\x0b\xcf\xbf\xba\xc4\x0cj\xc1\xf1\x8a\xbc\x11\xaf\xd2Hh\x0b\xcf\xbf\x8c_O\xeb\xe1\xd1\xa1\xbc\x13\xaf\xd2Hh\x0b\xcf\xbf\r@|\xc3\xde\x8e\xaa\xbc\x17\xaf\xd2Hh\x0b\xcf\xbfK\xab6\xfa\\\x95\xaf\xbc\x19\xaf\xd2Hh\x0b\xcf\xbf*\xaa\xa2\xa0\xedF\xb0\xbc\x1b\xaf\xd2Hh\x0b\xcf\xbf\x952\xff_\xbf\x9d\xad\xbc\x1e\xaf\xd2Hh\x0b\xcf\xbfp\\\n\x03yx\xa8\xbc\x1d\xaf\xd2Hh\x0b\xcf\xbf\xf2C\xd3\x0f,k\xa0\xbc \xaf\xd2Hh\x0b\xcf\xbf\xf4\xa6\xde\xf10\xaf\x8f\xbc$\xaf\xd2Hh\x0b\xcf\xbf\x08O\x93\xf5U\xa5\x86<%\xaf\xd2Hh\x0b\xcf\xbf\x0c#\xb3\xfaDo\x9d<(\xaf\xd2Hh\x0b\xcf\xbfR+\xbc\xa7\xcb@\xaa<)\xaf\xd2Hh\x0b\xcf\xbf'
|
179 |
+
p77
|
180 |
+
tp78
|
181 |
+
bsS'_mean'
|
182 |
+
p79
|
183 |
+
g64
|
184 |
+
(g65
|
185 |
+
(I0
|
186 |
+
tp80
|
187 |
+
g67
|
188 |
+
tp81
|
189 |
+
Rp82
|
190 |
+
(I1
|
191 |
+
(I34
|
192 |
+
tp83
|
193 |
+
g74
|
194 |
+
I00
|
195 |
+
S'J\x94!\x9e\xa9\xc4P\xc0\xc87\xa4W\xe2LS\xc0\xa8\xbb\xd0S\xb5nG\xc0\xfd;\xcd\nS-S\xc0\x8d)\x83)\xdc\xb8:\xc0\xb1_\xb6\xb4\x81\x8fR\xc00\x81\x8ci\x18\xa5\x1c\xc0x\xf2\xc5\xcc\xb4qQ\xc0\x89p\x95\xd0\xc0\xfa%@\xaf\xa9c|\x1e\xd8N\xc0R\x98!\xa8L\xdd:@\xce\rTB\xf4\xa9H\xc0pu\xc1\x16\n\x13D@\xcad\xd03H(A\xc0\xb2\x14\xc6\xbb9TI@\xe0\xf7T\xfd\xe0\xb31\xc0\'E\x0c\'\xa5\xadJ@\x88a9\xdd\xed\x0f\xee?e\x81#4\x89\xd8H@=\xe4\xb2\xbd\xd083@\x80\xbe\x81\xd2t\\C@\xb4\x1f\x00\\\xd8\xacA@\xe3\xca\x07\x8dR=9@\xe7?\xe0\x02S\x01I@k\xe0\xe7!\x0cc"@\xa0\xcbE\x15a\xf1N@\xf8\xb3\xf0\x85J\xa5"\xc0S\'\xf6\x01\xb6cQ@j_g~\xf7 =\xc0J\xf8<oDZR@\x05Ol\xe59\x98H\xc0\x12\xd0\x13\xff\x7f\xcaR@\xf2\xef\xa4\xef\xc3YQ\xc0\x87\xe0\x05\xe7\xfc\xdaR@'
|
196 |
+
p84
|
197 |
+
tp85
|
198 |
+
bsS'template_instance'
|
199 |
+
p86
|
200 |
+
g0
|
201 |
+
(cmenpo.shape.pointcloud
|
202 |
+
PointCloud
|
203 |
+
p87
|
204 |
+
g2
|
205 |
+
Ntp88
|
206 |
+
Rp89
|
207 |
+
(dp90
|
208 |
+
S'points'
|
209 |
+
p91
|
210 |
+
g64
|
211 |
+
(g65
|
212 |
+
(I0
|
213 |
+
tp92
|
214 |
+
g67
|
215 |
+
tp93
|
216 |
+
Rp94
|
217 |
+
(I1
|
218 |
+
(I17
|
219 |
+
I2
|
220 |
+
tp95
|
221 |
+
g74
|
222 |
+
I00
|
223 |
+
S'J\x94!\x9e\xa9\xc4P\xc0\xc87\xa4W\xe2LS\xc0\xa8\xbb\xd0S\xb5nG\xc0\xfd;\xcd\nS-S\xc0\x8d)\x83)\xdc\xb8:\xc0\xb1_\xb6\xb4\x81\x8fR\xc00\x81\x8ci\x18\xa5\x1c\xc0x\xf2\xc5\xcc\xb4qQ\xc0\x89p\x95\xd0\xc0\xfa%@\xaf\xa9c|\x1e\xd8N\xc0R\x98!\xa8L\xdd:@\xce\rTB\xf4\xa9H\xc0pu\xc1\x16\n\x13D@\xcad\xd03H(A\xc0\xb2\x14\xc6\xbb9TI@\xe0\xf7T\xfd\xe0\xb31\xc0\'E\x0c\'\xa5\xadJ@\x88a9\xdd\xed\x0f\xee?e\x81#4\x89\xd8H@=\xe4\xb2\xbd\xd083@\x80\xbe\x81\xd2t\\C@\xb4\x1f\x00\\\xd8\xacA@\xe3\xca\x07\x8dR=9@\xe7?\xe0\x02S\x01I@k\xe0\xe7!\x0cc"@\xa0\xcbE\x15a\xf1N@\xf8\xb3\xf0\x85J\xa5"\xc0S\'\xf6\x01\xb6cQ@j_g~\xf7 =\xc0J\xf8<oDZR@\x05Ol\xe59\x98H\xc0\x12\xd0\x13\xff\x7f\xcaR@\xf2\xef\xa4\xef\xc3YQ\xc0\x87\xe0\x05\xe7\xfc\xdaR@'
|
224 |
+
p96
|
225 |
+
tp97
|
226 |
+
bsS'_landmarks'
|
227 |
+
p98
|
228 |
+
NsbsbsS'similarity_weights'
|
229 |
+
p99
|
230 |
+
g64
|
231 |
+
(g65
|
232 |
+
(I0
|
233 |
+
tp100
|
234 |
+
g67
|
235 |
+
tp101
|
236 |
+
Rp102
|
237 |
+
(I1
|
238 |
+
(I4
|
239 |
+
tp103
|
240 |
+
g74
|
241 |
+
I00
|
242 |
+
S'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
|
243 |
+
p104
|
244 |
+
tp105
|
245 |
+
bsS'_weights'
|
246 |
+
p106
|
247 |
+
g64
|
248 |
+
(g65
|
249 |
+
(I0
|
250 |
+
tp107
|
251 |
+
g67
|
252 |
+
tp108
|
253 |
+
Rp109
|
254 |
+
(I1
|
255 |
+
(I30
|
256 |
+
tp110
|
257 |
+
g74
|
258 |
+
I00
|
259 |
+
S'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
|
260 |
+
p111
|
261 |
+
tp112
|
262 |
+
bsS'_target'
|
263 |
+
p113
|
264 |
+
g0
|
265 |
+
(g87
|
266 |
+
g2
|
267 |
+
Ntp114
|
268 |
+
Rp115
|
269 |
+
(dp116
|
270 |
+
g91
|
271 |
+
g64
|
272 |
+
(g65
|
273 |
+
(I0
|
274 |
+
tp117
|
275 |
+
g67
|
276 |
+
tp118
|
277 |
+
Rp119
|
278 |
+
(I1
|
279 |
+
(I17
|
280 |
+
I2
|
281 |
+
tp120
|
282 |
+
g74
|
283 |
+
I00
|
284 |
+
S'I\x94!\x9e\xa9\xc4P\xc0\xc77\xa4W\xe2LS\xc0\xa7\xbb\xd0S\xb5nG\xc0\xfc;\xcd\nS-S\xc0\x8c)\x83)\xdc\xb8:\xc0\xb0_\xb6\xb4\x81\x8fR\xc00\x81\x8ci\x18\xa5\x1c\xc0w\xf2\xc5\xcc\xb4qQ\xc0\x88p\x95\xd0\xc0\xfa%@\xae\xa9c|\x1e\xd8N\xc0Q\x98!\xa8L\xdd:@\xcd\rTB\xf4\xa9H\xc0ou\xc1\x16\n\x13D@\xcad\xd03H(A\xc0\xb1\x14\xc6\xbb9TI@\xe0\xf7T\xfd\xe0\xb31\xc0&E\x0c\'\xa5\xadJ@\x84a9\xdd\xed\x0f\xee?d\x81#4\x89\xd8H@<\xe4\xb2\xbd\xd083@\x7f\xbe\x81\xd2t\\C@\xb3\x1f\x00\\\xd8\xacA@\xe2\xca\x07\x8dR=9@\xe6?\xe0\x02S\x01I@j\xe0\xe7!\x0cc"@\x9f\xcbE\x15a\xf1N@\xf7\xb3\xf0\x85J\xa5"\xc0R\'\xf6\x01\xb6cQ@i_g~\xf7 =\xc0I\xf8<oDZR@\x04Ol\xe59\x98H\xc0\x11\xd0\x13\xff\x7f\xcaR@\xf1\xef\xa4\xef\xc3YQ\xc0\x86\xe0\x05\xe7\xfc\xdaR@'
|
285 |
+
p121
|
286 |
+
tp122
|
287 |
+
bsg98
|
288 |
+
NsbsS'global_transform'
|
289 |
+
p123
|
290 |
+
g0
|
291 |
+
(cmenpofit.transform.homogeneous
|
292 |
+
DifferentiableAlignmentSimilarity
|
293 |
+
p124
|
294 |
+
g2
|
295 |
+
Ntp125
|
296 |
+
Rp126
|
297 |
+
(dp127
|
298 |
+
S'_h_matrix'
|
299 |
+
p128
|
300 |
+
g64
|
301 |
+
(g65
|
302 |
+
(I0
|
303 |
+
tp129
|
304 |
+
g67
|
305 |
+
tp130
|
306 |
+
Rp131
|
307 |
+
(I1
|
308 |
+
(I3
|
309 |
+
I3
|
310 |
+
tp132
|
311 |
+
g74
|
312 |
+
I00
|
313 |
+
S'\xff\xff\xff\xff\xff\xff\xef?^\xf0\xcd\x1dB\x8fd<\x00\x00\x00\x00\x00\x00\xd09 \xb2\x1d\xb53\x9e_\xbc\xff\xff\xff\xff\xff\xff\xef?\x00\x00\x00\x00\x00\x00\xf0\xb9\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0?'
|
314 |
+
p133
|
315 |
+
tp134
|
316 |
+
bsg113
|
317 |
+
g0
|
318 |
+
(g87
|
319 |
+
g2
|
320 |
+
Ntp135
|
321 |
+
Rp136
|
322 |
+
(dp137
|
323 |
+
g91
|
324 |
+
g64
|
325 |
+
(g65
|
326 |
+
(I0
|
327 |
+
tp138
|
328 |
+
g67
|
329 |
+
tp139
|
330 |
+
Rp140
|
331 |
+
(I1
|
332 |
+
(I17
|
333 |
+
I2
|
334 |
+
tp141
|
335 |
+
g74
|
336 |
+
I00
|
337 |
+
S'J\x94!\x9e\xa9\xc4P\xc0\xc87\xa4W\xe2LS\xc0\xa8\xbb\xd0S\xb5nG\xc0\xfd;\xcd\nS-S\xc0\x8d)\x83)\xdc\xb8:\xc0\xb1_\xb6\xb4\x81\x8fR\xc00\x81\x8ci\x18\xa5\x1c\xc0x\xf2\xc5\xcc\xb4qQ\xc0\x89p\x95\xd0\xc0\xfa%@\xaf\xa9c|\x1e\xd8N\xc0R\x98!\xa8L\xdd:@\xce\rTB\xf4\xa9H\xc0pu\xc1\x16\n\x13D@\xcad\xd03H(A\xc0\xb2\x14\xc6\xbb9TI@\xe0\xf7T\xfd\xe0\xb31\xc0\'E\x0c\'\xa5\xadJ@\x88a9\xdd\xed\x0f\xee?e\x81#4\x89\xd8H@=\xe4\xb2\xbd\xd083@\x80\xbe\x81\xd2t\\C@\xb4\x1f\x00\\\xd8\xacA@\xe3\xca\x07\x8dR=9@\xe7?\xe0\x02S\x01I@k\xe0\xe7!\x0cc"@\xa0\xcbE\x15a\xf1N@\xf8\xb3\xf0\x85J\xa5"\xc0S\'\xf6\x01\xb6cQ@j_g~\xf7 =\xc0J\xf8<oDZR@\x05Ol\xe59\x98H\xc0\x12\xd0\x13\xff\x7f\xcaR@\xf2\xef\xa4\xef\xc3YQ\xc0\x87\xe0\x05\xe7\xfc\xdaR@'
|
338 |
+
p142
|
339 |
+
tp143
|
340 |
+
bsg98
|
341 |
+
NsbsS'allow_mirror'
|
342 |
+
p144
|
343 |
+
I00
|
344 |
+
sS'_source'
|
345 |
+
p145
|
346 |
+
g136
|
347 |
+
sbsS'model'
|
348 |
+
p146
|
349 |
+
g0
|
350 |
+
(cmenpo.model.pca
|
351 |
+
PCAModel
|
352 |
+
p147
|
353 |
+
g2
|
354 |
+
Ntp148
|
355 |
+
Rp149
|
356 |
+
(dp150
|
357 |
+
S'centred'
|
358 |
+
p151
|
359 |
+
I01
|
360 |
+
sg86
|
361 |
+
g0
|
362 |
+
(g87
|
363 |
+
g2
|
364 |
+
Ntp152
|
365 |
+
Rp153
|
366 |
+
(dp154
|
367 |
+
g91
|
368 |
+
g64
|
369 |
+
(g65
|
370 |
+
(I0
|
371 |
+
tp155
|
372 |
+
g67
|
373 |
+
tp156
|
374 |
+
Rp157
|
375 |
+
(I1
|
376 |
+
(I17
|
377 |
+
I2
|
378 |
+
tp158
|
379 |
+
g74
|
380 |
+
I00
|
381 |
+
S'\xd1\xe5\xf7\xba\x17\xb4L\xc0\xb0$3\x9e\x1d\xaeS\xc0\xfb)8>|\xa4C\xc0\xadu^\x0e\xd3\x10S\xc0\x96xK\x8c\xa4\xaa5\xc0\xc3\x07.\xa0KBR\xc0\xf4U\xed\x86\xb17\x14\xc01\xc7+\r\xc6pQ\xc0\xd7\xf5\xbc\xd9\xd6\x0e(@\x87\xd6\xd6<N\xe0P\xc0@5fx\x8d\xa99@\xb1\x8d\xde\x81\x91@N\xc0{\x07\xa1\xbe\x9c\xd5B@CC\xae\x04\xbaGI\xc0OZ\xb8\xaa#\xfeD@\x97\xfa\xea(j\xb2=\xc0m?t\x14\xf0KC@\x84\x7f\x05\x12\xd9\xa8\x0b\xc0@\x9f@\xfd7\x83C@\x8e\xf7\x1a\x1a\xdc%;@\xd8\xc6q\xd6i!A@`\x0f\xed\x92P\x92G@\xaejU\xe8\x8f\x016@)\x07Y :~O@m\xeeb&\xfd\xe2 @\x8b_._\xb1\xcdQ@\xaaV\xb0\xec@\xff#\xc0\xdd\\W\xd6\xde\xbfR@\xd0\xfa\xa6\xfa\xe3k:\xc0\x80\xa1H\xd5\xf8\xefR@\xdbM\xb9U2\x1bE\xc0\xd5\xee\x1f\x05\xd3\xddR@=xy\x03\x85\xd0L\xc0\xf6|\xbb<?\xb3R@'
|
382 |
+
p159
|
383 |
+
tp160
|
384 |
+
bsg98
|
385 |
+
NsbsS'_eigenvalues'
|
386 |
+
p161
|
387 |
+
g64
|
388 |
+
(g65
|
389 |
+
(I0
|
390 |
+
tp162
|
391 |
+
g67
|
392 |
+
tp163
|
393 |
+
Rp164
|
394 |
+
(I1
|
395 |
+
(I30
|
396 |
+
tp165
|
397 |
+
g74
|
398 |
+
I00
|
399 |
+
S"6\xf1\xa2i\xa9gy@\x9e3\xdc\x84\xdb\xb5i@\x9bi\x02\xdd9\xabR@4~K\x88'AC@}\xce\xcfWb\xd9:@\xb4\xfcv\x12H\xfe0@\xd1\xab\xfd\xfa\xc5\xa4-@\x9f\x12\x85\xd1\x9b\x91&@\x9fa\xe4\x07\x8c\x8f\x1d@\xff\xa3\x99\xda\x9a)\x15@\x99\x80\x80}\x03\x94\x11@\xd9F\xde\xb1\x1f\xca\x08@^cZw)\xaa\x04@\xdd\xeb4+\xe26\xfe?n\xb0\xe2\xc7_=\xfc?\xec\xa6\xe9\nO\xef\xf5?0\xc8\xf3G<\xda\xf3?\r$\x9e\x01\xccJ\xf2?\x01\x94\xc26\xd4\x91\xf0?q/S\xab:s\xee?\x82\x95^\xed\x0c\x97\xeb?\xb0l\xa1DL@\xea?\x18\xdf!Cgc\xe8?\x9a\xaeT\xac\x8b\xd8\xe6?c\xbe\x97QS\xff\xe5?\xd9\xb7\x18\xba\xe7:\xe4?\xed\x7flo\t-\xe2?`\x18\xe4\xd1\x173\xe0?D\xdft\x02\xb1\xcd\xdd?\x13\xa1\xa7\xf09$\xd7?"
|
400 |
+
p166
|
401 |
+
tp167
|
402 |
+
bsS'_trimmed_eigenvalues'
|
403 |
+
p168
|
404 |
+
g64
|
405 |
+
(g65
|
406 |
+
(I0
|
407 |
+
tp169
|
408 |
+
g67
|
409 |
+
tp170
|
410 |
+
Rp171
|
411 |
+
(I1
|
412 |
+
(I1
|
413 |
+
tp172
|
414 |
+
g74
|
415 |
+
I00
|
416 |
+
S'\x93Ex\xe8/e\xd3?'
|
417 |
+
p173
|
418 |
+
tp174
|
419 |
+
bsg63
|
420 |
+
g64
|
421 |
+
(g65
|
422 |
+
(I0
|
423 |
+
tp175
|
424 |
+
g67
|
425 |
+
tp176
|
426 |
+
Rp177
|
427 |
+
(I1
|
428 |
+
(I30
|
429 |
+
I34
|
430 |
+
tp178
|
431 |
+
g74
|
432 |
+
I00
|
433 |
+
S'\xb8\x86\xd5\xb9\x9c$\xd6\xbfZw\x81\x02\xe6e\xb5\xbf\xda\x05\xcb\xf8\xe3\xe9\xce\xbfWT\x91\x1f@\xa4\xbb\xbf\x12\x14\x83t\xd6\n\xc1\xbf\xcb\x01\xbb\x95\xc8\xe6\xc2\xbf\x84\xa6\\\x9a\xe2\x12\xa1\xbfH\x18D\xfcdD\xc7\xbf\x16\xc7U\x1c\xa4B\xa1?\xf9\x18\xb9\x05\x18\xe4\xc0\xbf)d\xabv<?\xb0?\xe5^\xf2\xe9\x03\xb3\x80\xbf=<\xfa\xce5\xd8\xae?\xbf\xd5@\xdcI\xac\xc3?\x10\xdfK\xce\x08\xcd\x9a?\x17\xa7@\n\xa4\xe9\xd3?\xffm\x96\x19&-\x85?\xe7\x9f0\xe8\xaf\xc5\xd7?\x99e\xc5\xc0\xf5\xaf\x81\xbf\xd4\x81\x90:q/\xd4?\xd8\xf2\xad\xb6\x9d\x02\xac\xbf\x16Ct\t\x8e\xa5\xc5?%843V\xd2\xb2\xbfeWr\x9f\x99\xbb\x90?\xcd_g$\xd3\x8a\xab\xbfF\xd5\xf6\x88VB\xbd\xbf\x90\xd0e\xa4z\xd6\x90?\xe5\x96\x07\xf0\xe4\x16\xc7\xbfV2\'[\xa8\r\xc0?\xdfo{\x84\x7f\x86\xc4\xbfJ$\xb1n\x0fO\xcf?c+\xdc\xcd`\xff\xbe\xbf\xda8\x05\xf2\xf1O\xd7?\xf5^\xb3\xd8\x18\x92\xb9\xbfU\xbc\xc7\xf0`{\xd0?,\x948\xf0\x88`\xc5\xbf/\x01\xbc\xe0\x84\xfc\xca?\xa8\x07X_\x81\x9b\xc2\xbf\x8d\xd7(\xcf\xe6\r\xc3? \xa4\xa4\x17\x9a4\xc3\xbf\xfaB\xb2\xd5\r9\xb8?\xc72\xd1\x18]\xd2\xc4\xbf\x0b\x96^\xbd9u\x9f?|\xae\x12x\x91*\xc6\xbf\xabx\xdd\xc8\xc5#\xaf\xbfy\x17\x9e\xce}\xe3\xc2\xbf\xc4\x93[t\xf6t\xc8\xbf\xd3\x1b:I\xde\xe6\xb2\xbf\x17\xdc\xd2\xde\x9b\xcd\xd3\xbfPw~\x85x\xde\x90\xbf<e6Pd?\xd6\xbf\x0e\x1f\x95,C\x92z?=\xf0U\xc4\xf4\xde\xd4\xbf\xcd\xbe\x9b\x83*\xa7\x9d?\xfcI\rK\xa7\x94\xca\xbf%\x1d\xfe\r\xb5!\xb4?\x1d\x0bx\xd7_\x7f\xb0\xbf\xa1Gw\x01\xed\xc8\xc1?\xbfw\xde/\x8a\x92\xa6?\xd3\x1e\x01*\xbdq\xc4?VH\xb5\xc2d\xf1\xbb?\xba,\xba\x87\xc3\xa7\xc3?g7\x8b)\x88\x88\xc4?}678\x11\xe3\xc2?\x83\x81i\xe7\xean\xca?\xfa2\xa0=\x11\x98\xc3?\xf1c\xdaQ\x05I\xcf?~\xe9"\xe2l\xa8\xc5?>\x98C\xe3\xf7\xe7\xc8?\x81;8\x89\xe2C\xcd?\xe4Pm@\xac\x91\xad?\xe9J;\xa4\t\x9e\xc8?\x92\xc1\x1e\x8c\x08\xeb\xb1\xbf\x93\xfe^83[\xbe?^\xf5\x13\xc0\xf5\xa1\xc1\xbfK\xb7o(\xaf\xf3\x88\xbf\xfd{\xc4\xcf\x10c\xc0\xbfg\x07}t\xe3\xed\xc8\xbf\xd8;N\xe3\x18R\xb2\xbf\x87Q\xe6\xdc\xc37\xd5\xbf\x10C2#\x00\x16P\xbf\x9b\x03n\xfd#\xb0\xd5\xbf\xa9\xfaM\xc9\x84\xe3\xb2?\xfd\x0f\xa3\x8c>\x15\xcb\xbf>@]\n\x9b#\xbd?X\xe1\n\xc0U\xc4r\xbf\x17!\xb0\x9d\xc7Q\xbb?\xb7g\x18Jc\x87\xca?P\x05\xab\x94\xd3\xc4\xab?1rV\x81\x9e%\xd5?\r\x82\xb8\xb3\x14\xd0\x9f\xbfr\x8d,\xed\x1d\x95\xd4?R@4\xe0\xd7\xe5\xbc\xbfA\xbe\xb2,\xf0\xe6\xc9?9\xc8q\xb72\xd5\xc2\xbf\x89\xd4\xaf\x8e\x13T\x96?\xd2\xa2\xe9BoR\xb7\xbf\xd4\x1b\x19\x8e\xd1\x0c\xbc\xbf\x04\xf6Z\xa7\x03\xbd\x99?.bx\xd2\xf6\xa8\xc7\xbf\x16d*!p\r\xc5?N!u\x9f%\x16\xce\xbf\x0eq\x8a\xbd\x8aI\xc9\xbf\x80\xa9\x92Z\x91{\xd0?H\xb0\xd8\x7f\xe9D\xb9\xbfQ({\n)P\xc2?^\xde\xea\xba6\x14\xb1?!\xbf\xf4\x19\xa0?\xaa?\xc7\xc9\xc5F\xa5G\xca?u1\x91\xf7\xa6\xfd\x92\xbf\x88\xf2\x91*\xce\x17\xd2?)\\s8\x95g\xa4\xbf\xea\xd3\xa3 V\xe2\xd3?\xb3\x83\x94\xe6\xf2 \xb0\xbf\x15\xef\x95\x85\xa5\xe2\xd1?\xa6:+y\xc7\xa3\xbd\xbf\xa0\x94b\xda\xd8\x0b\xbf?\x91O\xc3T\xf3u\xc3\xbf\x94\x0ee0pGd\xbf:u\xce^:\xb5\xc4\xbf\x90\xa3\xf0\xc7Qh\xbf\xbfFEv\xad\xf1\x13\xc2\xbf\xb7q\x8en\xae\xde\xd0\xbfY\x05\xf3\x92%l\xbc\xbf\xc8\x80q\xf2\xee\n\xd4\xbf5\x9e`\xd2G\x82\xa7\xbf\xb8\xd3\x11R]\xc1\xd2\xbfJ\x82\x17\xc6\xc7\xf4\x88\xbf>\x99\x94z\xc0\x95\xcb\xbfr<\xa44\x89]\x8c?\x89\xb3.\x9e"\xbf\xb3\xbf\xe6\xae\xd3\x81f\xe0\xb1?3J\xffzr\x08\xb6?\xa9\x9f\x82\xbd^!\xc0?:\x9b|a\x13\xa7\xcd?\xcc\xa2\x95\xa0i\x92\xc9?\x05\xe4o\x8b\x0b\xf2\xd5?\x0f\xf5\xee0\x12\x7f\xa3\xbfO\x1f\x85Z`\xf3\xc3?\xef\xe95C\xaa\x90\xb5\xbf\xa5e\xabDd:\x9d\xbfLr1=\x9f\xc6\xc1\xbf\xbe\xe3]\xf0|\x8c\xc7\xbf\xb3\x15\xfe\x02\xdeP\xbb\xbfw\x8b\xc4\t\x97\xc8\xd1\xbf\xec5OmU\x8c\x88\xbf\xfe\xfd\xeb\x11D \xd0\xbf\xe3\x04\xceD\x1cq\xb6?"\x83\xb0E\xaf#\xb6\xbf2\xa9r\xc8\xec\x03\xc0?,#\x02\xe3\x93S\xcb?\x12\x16\x8a\x97\x9eD\xba?j\xfb\xcdd\xcf\xfb\xd4?\x05\xad<K\x93b\xa8?|3t\xf3\xc4\xbb\xcd?\xddp\xaf7\x88U\xa8\xbf3\xc1\xe3sLb\xb0\xbf:8\xbe\xa3\xab\xb1\xc3\xbf\x0c\x88\xce#\xfd\x95\xce\xbf!\xf5q\xce\x84\xa5\xc7\xbf\x9b\xa8\x94G\x12\xa6\xd1\xbfH\x9b\xc5\x0c\x85\x9c\xb8\xbf\xd7\x10:\xd2\'&\xc7\xbfd\x1e\xe6y\xff\xab\xa9?\x86\x17\xb0\xee\x8f\xb3\xab\xbf?\xac\x99\xabz\xa4\xc4?\xa4\xab\x99\x83\x13\xa2\xbb?\x02g\xc2X$\xf5\xc3?l5\\\x1d\x19\xc7\xd0?:}\x97]\xb4\xde\xc0?\xb3\x05\xb0k\xb6\xa8\xc1?\xb6e\x07\xcf1\xce\xe0?\xaf\xfe\x9e[\xd5\r\x9f?\x91\x96y\xa5\x10\x8e\xcd?x\xbbq\x87\x85\xc5\x8b?#\xca\xcd\x10\xccW\xbb\xbf\xd05*Z\x1e\x12\xac?V\xbd \x16\x1f\xe4\xd6\xbf\xb8\xf7c \x04\x81\xab?\xae\x10\x9e]\xfc?\xd7\xbf\x90q\xc4\x02\xddA\x9e?%\x0f\xc4v\x19V\xc9\xbf\xa1\xben\xb2\x81\xb3\x9e\xbf\xaed}J\x17\xec\xb3?\x91\xc7\x05O\xeep\xb0\xbf\x13\xc5\xbaY/\xa0\xd1?\xc7\x0c\xeey\x1d\xf4\xb0\xbf6\x05\x7f\xb2\x9d\xa9\xca?\xf8\x87\xb7\xeb\xa1\xcd\x90\xbf\x93\xacH\xf2/5\x93?\x07\\\xd4\xd9L!\xae?D"\x8fB\xe7\x8d\xc4\xbf\x91Q\x95\x01=\xa4\xb9?\x05&\x94,XH\xca\xbfL\xf0*>\xb4\xad\xb7?\xd5y\x9e\xb9\xb1\x03\xbe\xbf-4n\xf5\x00\x0f\xa5?[\x1a\xaa\x95\xed\xec\x9b?\xb2+X\xa8\xc5\x17\xb0\xbf\x92\rWI\x1f\xc1\xbc?r\xfc\x142\xec>\xc5\xbf\x11\x10\xa5\xa8\x8fc\xb0?\x83\x1a\xc8c`{\xca\xbf\x11\x0b^!c\xd9\x9f\xbf\xf2\xadv\xcbh\xd0\xa5?/\xdc\xc5\xb7r\xdb\xc4\xbfl{\x00\x1b1\xf8\xc0?\xbcv\\7v\xc0\xb0?\xa1(S1\xdf\xd3\xb6?\x05\xeb\x90e\x8fS\xcd?\xd2\xdc\x81\xeb\xf5D\x97?\x0c\xde\xc8\xee6\xc9\xc4?\xb3\xd8\xa1\xae\x86 \x93\xbf\xc5 \x18\x86\\<\xad\xbf\x1d\x9en\xc8D\xda\x9b\xbf\xef\xe5\tyq\xa5\xce\xbfNO\xd8\x0f\x99\x12\x96\xbf\xec\x18q\x1e\xf7w\xcc\xbf\x93\x8b@\x18W\xd2\x9b\xbf[\x12\x96\xb4\xf2q\x88\xbf\x81\xe8bHE\x1b\x98\xbfQ\xbbL\n\x00\xe8\xc8?3\x84\x18\'=F\x92?\xf7\xd9.S\xea\t\xcc?\xc2\x8a:#\x1f\x02\xb4?\xeb3\xd5\xc4\xb2\x0c\xb8?4}\x9c\xba\xf9\x83\xa6?\x8e\xe0N\xaf\xadw\xbd\xbf`%\x84\xaa\xfa\xb1\xa8\xbfi\x00\xe4\x06ff\xd4\xbfY\x98`\xef[\xb8\xb2\xbfk\xed\xc43T\xf7\xd8\xbfV\x1e=\xce\xfc\xcd\x9c\xbf]\xa9rq\xfc\xb0\xc6\xbf\xf6ZO\xaell\x9f\xbf\xf8\xc9v\xcf\x02\x82\xc9?\x15\x00KZUz\xc0\xbf\xff\xce\xc5V\x04\x00\xe1?c\xe3a\x8e@y\xcc?\x9d\x96`\x18\'J\xb2\xbf\t$U\xaag\xe6Y\xbf\x07\xd7\xab\xb7\x0fA\x8c\xbf\xce\xe3\xfe*\xbf\xd5\xcc\xbf\xea\x06[\xdcA\xe8\xb2?\xbaP\xee\xa7\xa0\xe3\xd3\xbf\xf9\xc4\xaal\x84\x94\xbf?\x18\xca\xc7\xe2\xa3\x96\xc3\xbf\xa0\xa3z9%\x9c\x91\xbfh\xd9\x8e\x02\xf4\xc6\xc1?W\xfd\xe2\x9d\x9e\xc5\xbd\xbf\x9a6\x1b)\xd0[\xd9?-\x93\xbe\xe4\x0c\x91\xb7\xbfU\rK\x9aKh\xd4?\xdd>\xa3\x89\xaf\x0b\xb6?\xc0jd\x14\x06\xabi?\xdf^\xd6Rc\x1b\xbb?4\x95\xcb\x0b{]\xd3\xbf\xef%\xc8\x1c&a\x9d?\xe1\xe1\x8a-\xb7\x18\xd6\xbf\xcf\xb4\xd1\x16\x95*\xac\xbfX\x02?B\xf3\xaf\xc3\xbf\tDat\x19\x8f\xaa\xbf\xd1\xab(l{.\xc2?\x83\xeaZ\xc35:z\xbfOp\xdf\x921 \xd2?\xc3\xe4\xeedm\xb7\xa4?\xc0.y\xe5\xa7E\xca?Bq\xba\xcb\x87\xbb\x96?>\xc0\x01N\x16\'9\xbf\x180q\x80\xd9^\x8c\xbf\x81\xa6d:B\xd3\xcb\xbf\x89\xcb\xc7?\xcf\xd4\xa5\xbf\xd4\xdfaI\xe0y\xce\xbf\xc5B\xb8\xfb\xea\xa4\xb8?\x8f\xdaq\x9f\x83^\xa0?C\xa0\xeaE]\x00\xa3?\xdf1\xbe\x94^\xd9\xcf?\x8co\x01-\xa1s\x9e\xbf]o\x159\xf9\x7f\xd0?\x1c9\xe6Cal\xa4\xbf&\xf9\x0b\x08z\x9e\x95?\xbf\xec\xf0 7\xf2\xa4? \x94\xba\x9b\x8b\x18\xce\xbfd8\xe8\x99\xa1)\x8f?\x92T\xe6\x00\x99\xfa\xd1\xbf\xe8\x8dQ#o\xfb\xc1\xbf\xc0<\xa3\x02q\xfe\xb3?\x98\xdd\xe9w\xf6\x80\xc9\xbf\x84\x1c\r4\x05\xa9\xd1?\x9eA\x04\x9b}J\xa4?/\xee\xb3+\x00\xb8\xb8?\xd6*\xf4\xe7\x05_\xce? \x83\x8d\x0f\x1f\xbe\xd1\xbfHm[\x17\xee\xbf\xb6?\xf5v\xfc~\x858\xd3\xbf\xc6\x86\x10\xfe8\xcb\xb7\xbf\xed\x9cK\x89S\xf6^\xbf\x98\xbe\x00\x94\\\x1a\xb6\xbf\xc1\xf8\x94\xa9C\x95\xd2?\xdf\x91\nS\xa8\x98\xac?Y\x82\xe4B-\x92\xd1?\xea\'#\x99\x96\xe6\xb9?\x90\x81\x9b\xf7\xb5\x97\xa0?spq\xa6\xf3\xa3E\xbf\x80\x81k\xed\xff\x89\xd1\xbfA\xed\xc8<\xcb|\xbf\xbfQ\x8f\xf3\x92K#\xce\xbf\x8f\x85\x8f\xc6V\xf7\xd3\xbfJ\x91]|i\xcf\xad?\xe1\xa9(\xd2\xaf\xe0\xb9?pa$\xf6\xc6G\xc4?\x02\x1a\xcbS\xbc\xb6\xd8?\xf7\x82\x91]\x0f\x02\xb0?\xb0\x9c\xefC\xad\xf0\xcc?\xe1\x91\x812\xad8\x89?F\xb9F\x02\xca[\xc8\xbf\x01\xf2F\x9f\xbc\'\xb0\xbff\xd9\xba\x00\x07\xd9\xd5\xbf\xae\xa1z\x12"3\xc0\xbf\xe6,gn\xbf|\xae\xbfz\x871\xcam\xeb\xa7?\xbc\xbc\xd9\xeehZ\xd2?}\xd5\x9eO\x17u\xb1?\xec\n\xa7\xa5\xe8\xc0\xc8?\xde\x86\xb1iu\x04\xac?\xc8\xb6\xed\x89\xad\x89\xc5\xbf\x9fl\xf0\xdcd\x06\xa6\xbf\xcb{P\'o\t\xd4\xbf\xb8&W\x15\x8e5\x88\xbfoL\x1c\x82C6\xb9\xbfoJ\x150\xd1#\xa8?\xa1\x8e\xbeX\x96\x90\xc7?\x90\x84~\rS2\xac\xbf5\xf0\xa2\n}y\xcf?>\x8b\x8c91~\xb7\xbf\xb7K\xb6\x1a\xe9+\xb7?\x1d\xc3\xa5\xf3\xc6\xd4Q\xbfla\x9f\x01\x87@\xb3\xbf \x91l\xc7\xd0\x9d\xbe?\x13\x87\xcf\xc1\x99\xf0\xc3\xbf\xb4Z\xfd\x94"\x0f\xa2?\xdaI9\xbd\x1b\x19]\xbfb)4\xc8\x9f)\xae\xbf=\xcd\\\x03\xdf\xc3\xc0\xbfv-V\x10\t\xb8\xa1\xbf\xa7\x84\x90=\xef\xa0\xaa\xbf\x9e\x9b$\xa5k\xc2\x81\xbf$\x8fG\x91\x90\xd2\xc6?C\x8b6\xad{i\x92?\x98"TIM\xa9\xca?\xd6-\xe2K\xdb\xbe\xb3?\x17\xc3\x8c&\x19\xe1\xa2\xbfM\xcb\x0e\xe0/x\xb9?\xe9";,\x9c\x8a\xd1\xbf\xbe\x9a\xeak8\xd9\xb9\xbf%Gl\x0e\x14\x7f\xc5\xbf\xd9\x93{N\x19i\xc2\xbf\x859\\\x8d,[\xc4?t\xb7\x10Y\xe6d\xaf\xbfY]\xbbB\xcb\x0e\xd3?X@a\x8fta\xc5?\xff\x80\xfc\xc6\x9a\xe6\x83?1\xc4GH+\x1f\xb8?\x8b\xee\xe1Q\xc5\xf7\xd4\xbfCUq\x8b*x\x9d\xbf\xfd\xa8\xc4\xce\xff\xb2\xd1\xbfQ\x1f\x0bL-\xcc\xaa\xbf\xab\xc69\x9fG`\xc4?\x1aT\xdb\x0f\xc8\xcb\xbb\xbf\x08\xa9Y\xca\xd4\xe9\xdb?\x03\x89\xb1\x86\x01\xd4\xb5\xbf\xb3\xf3\x98c\xbas\xc3?x\x88}\x96\xed\x97\xc8?I\x16\xab\x1a&I\xd5\xbf8\xf3\xf3s\x00?\xc6\xbfy\xe1+\xfc\x01D\xc4?\x97\x95N\xc8\xfe\x97\xba?\xad[\xd9\x04\x043\xac\xbf\x07"\x7fi;\xa3\xcf?\xbdl<\xe5\xdee\xc9\xbf\x1c`c\'\xb1w\xac?\x80`\xf5>\xbb\xfb\xa9\xbf\x11c\xc5G\xd6S\xc7\xbf>\x12\xfa\x83Fy\xc4?{g\x91\xa2\xb3F\xcf\xbf\xa4\xdc;\xa3zk\xb5?\xdb\x9eS\xa2\xb6F\xb3?\x8c&\x9f\xce\x12]\xc0\xbf_=\xf0\xcf)\xf5\xd1?+QH\xfdPp\xb6\xbf\xe6t\xdb*\xa8 \xa4?\xaf\x0b\x18b\x12\xf8\xc0?\x1f\xd1\xdbzr\xb2\xd1\xbf\xc9\xa6\xe5#m\xe2\xb6?\xe5/\x95\x10\xb0\xcf\xc7\xbfm\xc7\xdck\x949\xbc\xbf\x1b\x1a\xa6\xf9\xf5\x85\xce?\xc8\xb2\x10\x8a\xf5\x10\xa6\xbf\xb8\xcbv"+.\xd4?*w\xe7\x04*\xbe\xc0?\x80S\x89\x9e"\x7f\x9f\xbf\xe4\xc0Sl\xeb\x87\x93?g\xe6\x8bp\x0c\x0f\xd7\xbf\x0b\x07\x9a\xa1\xfep\xc7\xbf\xa9\xec\xea/N \xc1\xbf\xe1\xa6\x1a\x8b\xe0K\xb2\xbf\x08V\x97w\x1e\xb7\xcd?m\xa5\xccJJ\xb3\xc3?\x83\x05\xc6K0\xc1\xcc\xbfm/\x11\xa9\x01\x05\xc8?CKg\xc80\x1f\xc1?pk\x0f\x1b\xa8\xb0\xc1\xbf\x98A\x7f(\xf7\xa4\xd5?\xe1I2\x06#\xd2\xc7\xbfc\x99\xf4\xf5#\xfd{?\xc8m"\x7f\x0f\xf0\xbb?>\xae\xffYY1\xd4\xbf\x87\x94\x92\'\xb0F\xc2?aeg\x83\xa9K\xc4\xbf\x90\x9d\x15\r\xb9\x14\xc6\xbf\xe9\xd3#x\x96\xac\xd1?S\xb0\xab:\xa3\xea\xbb\xbf\x89\x94+\t\x1a\xb5\xac?|9V\xdd\x8c\xd7\xd1?\xbcK5\x0e5\x1f\xc9\xbf\xfe\x13I\x9c\xf9\xcb\x9b\xbfI\xa2\xd0\x1bcm\xa1\xbf\xcf\x1d\x98\xd4\xb6\x0c\xcf\xbf\x83zh\xd7\r\x0e\xcf?BS\x9e\xfckr\xb3?\x1ff\nq\xb5,\x9c\xbf\x98t\xc9\xe5\xd7\x17\xc5?\x8cSThu\xcf\xcd\xbfq\xae\xf8\xd3)\x9b\xa1\xbf\x12\r\xdc\x98M\x8f\xb1\xbf>\xbb\x9a\x1a_Y\xb7\xbf\xa9\xf0\xfd\xd8\x99`\xcc?l\xfd\x9d5L\x99\xb2?\xd7\xcdK\x12\xc7(\xc1?"\xd2\x91y\x03S\xb3?\xb0V 3\x1d`\xc3\xbfj\xfe\xef`\x06\'\xbb\xbf\xd81\\\xc5)\x9c\xcc\xbf\xd4\x15h\x88y\xde\xc0\xbf\x18%(X\x8e\x1c\xd0?]c\xd4\xcc\xcdN\xc3?\xfc!\x0e\xc1\x17\xf1\xca?\xeb\xc5|\x1a\x9c\x1d\xc8?\x98\x9e"\x19e\xd6\xc3\xbf\xeaGv\xbc7\x96\xba\xbf*\x17-1\x84v\xca\xbf@\x10\xc4Ur\x12\xd2\xbf5\x9a\xb1\xfa\x00\xe4\x93?\xed\xe6\xaa\xa4\xf9k\xa8?p\x1f\xcd\xcb\xf5\xa2\xc9?\x06\xe5w\xf7\'\x7f\xd6?\xa7\xd8V\xc3u\xec\x99\xbf\n\xc9\xc4?\xa3H\xb8\xbf\x97\x95\'\x080\xd2\xa7\xbfF\x1fL\xb45\xdb\xd3\xbf\x10*\xbb\x8c\n\x93\x80?5\xac6wP9\xac?\x13-\x85\xfc\x00p\xa5\xbf\x00\x1cV\xb7a\xc9\xd0?\x86\xca\xacl\xc2\xf4\xb6\xbf\xce\xd9oE\xa4\xa4\x96?\rx~\x11\xd7\x00\xc3?\xda\xce\xcfW\xbd\xb5\xd0\xbf\xdf\x1aK\xa2\xc5\x14\xc2?\xcb\xc9\'\x8a~\x93\xaf\xbf\xc20\x02\xe4\xa9H\xc6\xbf/\xb7\xe1\xf9\x8a\xfb\xc7?\x8d\x85\xd3\xc1\xf9a\xc6\xbf\xbf\x0f\x07\xc8\xc5\xae\xb5?v\nW,\xbb3\xc4?\xf0yR\x8b\x81\xa9\xba\xbf\xb9!3}\xd2\x00\xb9\xbf\xf24\xe2\x93\xbf\xdc\xbd\xbf\x899\x83\xf6J\x15\xbf?6\xd3&\xa6\x92\x9d\xc4?{,%\x9au\r\xb0?\xec=\x86\t\xdcv\xc2?\x14k\xae\x1b\xfe\xf4\xa8\xbf$*\xddBbz\xc8\xbfr\x0e\xa0\xae\xd3\xef\xbb\xbf\xfd-{C"\xa2\xc5\xbfPr\t\x90\xfde\xa3\xbf\xb8\xa9zQ\x1c\xfc\xca?\x99+\xaf\x91\x19v\xc0?\xac1\xd7\xfb\xa6\x9a\xba?+\xf0O\xf7}b\xb8?\xfa\x84C\xdb\x02\x99\xd0\xbf\xad\xb3i\x9d\x85\xae\xb8\xbf\xf8\xd1vZK\x97\x98?LTNh2M\xc5\xbf\xf3V\xfc\xaaG\x8b\xce?\x99q2`,}\xb6?KR\xb1\x0c\x82}\xb6\xbf\xfd\xde\x93E{\x88\xd1?8\xb4\xd4\xf2E+\xd0\xbf\x957w^\xf4\xba\xc9\xbf\x05\xb5;VH\x86\xc0?4\xad/\xff\xfe\'\xd6\xbfW\xd2N\xdfp\xdc\xca?v\xd6>\x01\xb9b\xd1?OX\xaf&U\xf6\xaa\xbf\xa3\x14!\xfc\xbb\xd7\xd1?\xa9X\tW>\x1c\xbb\xbf\x1f\xd4\xfa\xf6\xb5\xbe\xcb\xbf\x96D\xa0\xd1)\xf2\x8b?\x1c\xb7\x1f\xef\xdd0\xa1\xbf\x10F\xe8\x0e\xa6\x8b\xb6\xbfV\'\xc48\x9b\xe8\xb6?\xf0\x80\x94\xc6\xe1\x8e\xc5?V>,\xc0>_H\xbf\xf3\xe1j\xb2\x81\xc4\x94\xbf\xd1|\x05^<|\x93\xbf\xb4\xe2\xeba\xeb\x9e\xc6\xbf\xd2b6\xd0\x15P\xb4\xbf\xb5\xfaiba\xce\xb2?\x1b\xd9\xea\xaa\xe5\x05\xa1\xbfD9#\xf7#\x1b\xc5?\\}q\xd0}\x96\xcf?\x8e\x0b\xb8\xec~\xb7\xc7\xbf^\x82%\xb0\xaa\xc2\xc3\xbf\xe4\x0e\xa1\x05\'>\xb1\xbf\x83\x96\xfe\x0c{\xd7\xcf\xbf\xb3\xe4YW*\x9e\xc6?\x13\xff\xf0j\xcda\xcc?\x84\xdd~\x83EF\xab?T\x82f\xe0\x05\xcd\xd3?\xd9en\xe2\xe4*\xc4\xbfI\x9dBll\xfc\xda\xbf\x19\xf1B\xc8|\xc5\xae\xbf\x1d\xd1r\xb4\xc9\x04\xae\xbf\x1a\x9a\xdd\xc4\xad$\xba?\xf4w?\xea\x83\xf9\xd4?\xd7 \xb3]\x91\xfe\xc9?O\xda\xdf\xae\x1b\xa2\xb9\xbf\xa6H\xb1n+t\xc0\xbf\x8au\x1f\xd5\x15\xa2\xb9\xbf\xa5\xb8\xcb\xbc\xa8\xc0\xd0\xbf7qO\xc3\xff\xe8\xab?w\x8f\xb5\xa4\xc7\x13\xca?\xd4\x9e;B\x9d\x10\xc6\xbf\x93\x1d\xdc5\x0b\xb8\xc7?\r\xaf\xa4*%\xa5\xd2?m\x89Z\x97H\x1b\xc6\xbf\xf8\xb1Yu\xe5p\xc3?K\x1d\xd1\x94 \xa0\xc4\xbfW\x19\x08Q\rL\xdb\xbft\xf4w\'v`\xc5?\xc9!\xa6\n\xcbt\xa0\xbf\x85`\x9b>\xc6B\x9c?\xb5\xb68e\xf8\xaa\xd8?6\x05\x15\n\x9d\x92\xb4\xbf\xed\xf1i$\x8e\t\xaa\xbf=w\xc4\xd9\x8d\xef\xb7?\xda\x86\xd0\xd3\xcd\xbd\xd5\xbfQ8\xf0\x9d\xb8_\xc4\xbf\xb3\xcc\xe5\xde\xf3_\xc1?\x10\xd0\xba\x80\xf8\xfa\xbd?P\xf1\x0b\xbe\xf9I\xc8?\xc24\xe3\x16\x10\xb0\xc3?\r\xdc\x94\xcd\x0f\xa3\xbd\xbfa\xbaV\x1e\x876\xd1\xbfh)\x9b\xbf8E\xb2\xbf\x99\xc7\xc4\xa3\x9dt\xa7?#\xc1MM\x86\x8f\xc0?x\xef\x0b\xc2\x9c\xfc\xc0?\xb2\x1e\xf0y\x8d\x0e\xbb\xbf\xa2\x9a\xc36\x9a\x8b\xa5\xbfe[I\x9f%\xa7\x8a?\x06\xfa\xf9zY\xfc\xb2\xbfc\xb0OpN\x18\xb8?\xa3$_\xb7he\xa1?\xbb\xe8^l\xb2\x83\xb3\xbf;\x8b<q\n\xa3\\?&F\x1f\x1c\xfag\x9d?\x13ql\x81\xd1z\xb0\xbf\xfb^&L\xd6\xf0\xad\xbf\x9f\xd0\xa2A\'i\xc4?HD\xd4\xc4\x98\xb9p\xbf\xc8\xcb?<\xdc\xb6\xc5\xbfL*\r\xa7r\x92\xb8?\xe0\x15FoJ\xcd\x91?\x84#aI0:\xbd\xbfW\xa8 \x94XL\xcb?Z9\xc5\x86\x99\x8c\xab?\xb1\xe0\xe2\x08.\x98\xd1\xbf\xb1M \xef\xb8P\xad?_\x8f\x02\x9e\xea\xdf\xb5?J\xe6\xeb\xf2\xb9\'\xcb\xbfz\x0b\\-|\xc5\xc6?\xb0\x06\xfd\xd7c\xa0\xd2?\xe0\xd3\x13&\x16\xd7\xd1\xbfq\xf1\xe0\xb1!\x14\xc8\xbf\xffXRg\xc1\x9c\xc7?/\xd6U\x17~O\xb5?c\xe2\xae\x16\x9agN\xbf\x17[\xc7_!i\xb2\xbf\x828\xa6$a\xf6\x9e\xbf\x80\xd4\xff.d&\xc3?re\x97=\xba>\xc1\xbfO\xd0\x90\xb0\xee\xa3\xce\xbf\xc5\xab\xe6\x14\x81[\xca?\xa3\x881\xf0\x00\xc6\xd1?6\xfc\xd1\xede\x01\xa7?\xcb\r\x16^\x08Y\xce\xbf\xea\xf8\x99\xc8u\xc8\xd6\xbf/\xe2X\xc3\x07\x88\xb6?\xf0\'\xa3PpB\xcc?\xf8rSe*\xc8\xc2?\n\xca\xbbp\x07\x7f\xc3?N\xf2)\x84\xa7j\xd2\xbf,\x9c\xf0\xa1L\xeb\xd2\xbf\x15u\x88\xca\xf7H\xb7?{\xccU\x8a+|\x97?\xcc/uZ\x898\xc7?\xd9\xa8\xc9\xd4w{\xd1?\xc6\x86\xa8 y\xd7\xbd\xbf\xb6u\xd4\x0f+D\xc0\xbf\xc9M|=4v\xbc\xbf\xdc[f)\xe0\xf7\xcd\xbf\x9f\xb8\x7f\xbd\xbd\xce\xc6?\xc2\x99rl\xa3\xa2\xd2?\xc2\x08Zd7\xee\xb6\xbf\xf9\xec\x12\x08\xa6\xa1\x9e\xbf\xa9\xacJy\x1b\x9f\xb8\xbf\x1b0\x8d2n1\xc8\xbfa\xca\x97\xd2\xccR\xc5?ev\x03\xfciy\xc7?j\xa8\xac\xc5\xc7\x99\xad\xbf\xe0\xd5r5\x90\xcd\xb8?O\x91\x17\x18\xc6\xb4\x94\xbf|Z#\xe5dG\xd1\xbf\x1b\xacz\xc8\xa1\x97\x8f\xbf\xdcpl\xb8\xf7=\xb6?\x88\x03&\x1fZy\xc0?\xbc\x10\xfb\xe0\x9d\x19\xc8?,s\xf2]\x97\x98\xd0\xbf\x03\xd7VL\xaa$\xc5\xbf\xc7\x18\xbc\xecW&\xd1?Y\x00C\xb1\xc8$\xb5\xbf\xe1|^\xf5\x82D\xbb\xbf,\xe1\xfaN!5\xb9?&\x16]\x11a\xde\xbb\xbf\'\xe2ea5\'Q?\x92\x90\x06R\xd6\xe4\xcd?_(x_U\xd6\xa8?q\xf7\xdb\xbc\\\x1c\xb1\xbf%\x02\xa6\xf3\xc09\xa8\xbf\xd3\xda\x93>y\x00\xc7\xbf\x8b\x86kJS\x9e\xb9\xbf\x10R\xf3u\x02\t\xbe?Ek\xcf\x9a\x11f\xc4?L\n\x16)\xb8\xf3\xb4?r\x7f\x9c\x9c\xa2\x86\xa0?V\x01\xe9\xe9`e\xa6\xbf=\xf5\xca\xd2\x00\xbc\xd0\xbf\xb3@(\xa2\xd4L\xa0\xbft\x07v\x9d!!\xd2?\x8c\x96l%\x8cV\xbc\xbf\x98\x82G\xab\xd8\x07\xbf\xbf\xc3Z \xcd\xdc\x85\xd2?\x02>t\xbe\xa7\xf4\xc5\xbf\x84\xed\x84\x9b\x89\x95\xd4\xbf\x8dG\x1cDR\xf4\xdb?QK^\x18\xbe`\xc7?\rjz\x05\x88g\xd5\xbf\xeb\xe6\x1aJ\xcbZh?Z?\x03\xf43\xb3\xac\xbf\x12\xeb$a\xbd\x0c\xa7\xbf\xd2Wu)[\xe3\xcf?k\x1dm\xaa0\x0b\xa9\xbf\xbajY.\x07\xae\xb6\xbf>`M*\x9f\x9a\xb5?\xd5+`\x90\xb6\x0f\xbc\xbf4\x03\x80\x04\xaf\xde\xa1\xbf\xe5\x8b\xdc#\x15v\xb4?\x01\xae\xc1o\x07)\x94\xbf\x9e\xdd\xfa\x05v\x86\xb8?B\xb5\x14\xb4\x10\xd1\x9b?\xca\xb4\xc6\x9dw\xe1\xc4\xbf\x14Bn\xc6\xaeG\x95?Z\xca\xb2^\x0f7~?\xe1w\xcaB\xd3\xbd\xa0\xbfM\xda\x00v`m\xc1?\xa6\xab6\xf1st\xa2?>9\xe9\x1fe\xc0\xb7\xbf\xfc\x00\x0e-4\xa4\xa1\xbfA"\xef\xf5\xf4\xf1\xab\xbf\xf5\x07\x82C\x87\x14\x8a\xbf\x00\xc3\x0f\xdbKZ\xc0?\x18\xc5L\xf4\x1d\xc1\xb9?`\xde\x8c\xc0\xc7g\xb4\xbf\x94\x88S\xe8TS\xc7\xbfY\xc2\xf9\x93\x8dsh\xbf\x0b\xfe\xbbs\x7f\xf9\xc4?O\xdd\x89a!\xde\xac?#X\xa1\xfd} \xbc\xbf\x05\xb7\x10\xacID\x83\xbfA\xdd\x11[\x93\xfa\xc6?\x83l\x95V\xc7\xcb\xc1\xbf\x8b\x0b\xbe\xa6\x16\x86\xd3\xbf\xb1\'\x98f\xa2\xa0\xc8?w\xbfg\x182\xcd\xbe?\xb2V\xe3\xacp\x01l\xbfZ\x16\x93:\x01\xb9\xd9?\x93\xd2\xb9/r\x99\xc3\xbf\xbd\x84<\xd5G|\xe3\xbf&\xfa?C\xfd\xcd\xa6?\x85?\xe6\xc8,Y\xd0?\x89 \x0c\x1bNT\xa3?\xb1J-\xa9\xad\xac\xbe\xbfd5\x86\x10\x85\xc4\xa3?\xa4\xab\x0e\x8d\xb0\xb1\xcc?\xe9\xf1\xf7\xa3m\xeef\xbf\xf2P\x94=\xeb\xa8\xb1\xbf\xff\x8e{\xb90\x01\xb4\xbfY\x8c\xf3\xf4\xbc\xce\xc8\xbf\xdf\xbc{W]"\xa0?\xc2\xa4\x14dq\x1e\xc8?\x10x\xad\xe1\xe3\xd2\xb9?:\xc3\xf6}\x82{\xbc?\xe6\xe1\x97V\x81S\xc6\xbfd\x04\x13{\x05\xff\xd3\xbf\x02g\x80\x00\x94\x9f\xb9?,\xd9\xba\xa2\x9a\x97\xd1?\x05^\x0e\xa9\xfc|\xb7?{PTp\xf9\x1b\x93?\x85I\xb88f\x9d\xcd\xbf\xca\x14\xf2\x984\xd9\xd2\xbf1\xe5P\xf6\x06z\xc8?\xf6\xb8l4\xd7\xe7\xcf?>\xa6\xc5\xbaB\x92\xb2\xbf\xac\x8e\x08\xb9p\x93\xb1?m\xa7\xbf\xdb\x98LU?\xefS.p\x1b)\xd8\xbf\x12lZ-\xc7\xcb\x9e\xbf\xdd\xe0\xba\xe2\xf4"\xd7?\xf1r\xadw8x\xb2?\xebxD\xd2\xcc\x93\xcb\xbf/Uv\xbe\xcf4n?\xf9A\x15\x0f\xc8)\xbb?\xd6^\x89\x01\x10\xd0\xbc\xbf\xd5 \x85\xf4\x9d\xe7\x94\xbf\x9d \xf2\xc7\xb2\x19\xb2?;\xa0E`\x08p\x91?\xb7#QQ\xc5.\xb2?a\xd4\x07s4&\xa1\xbf\'o\xf9g6\x1a\xb9\xbf[\xa64\x08|\xfa\xaf?\xc3\xb5\x85\xa7\xc7F\x9c\xbf\xcb\xb2\xa1\xbe\xa1\xfb\xc3\xbf8#\xc6J\x15w\xba?\xd5xrq\x82\xce\xcb?\x92T\x16\x8cL(\xb5\xbfC\xbaj\xc0\xc3\x87\xaf\xbf\x078\x12L\xc1U\x98?\'\xc9\xea\xa0N\xa9\xcb\xbf\x82\x04\xf98B\xcc\x86?>\x05B\xe1!\xf9\xdb?\xa6\xf4\x06\rb\x96\xa5?\x80\xdd\xdcY\x18^\xdb\xbfx\xeb\xbd\x90\x85{\xc2\xbf\xc7\xb3/}\x18]\xc3?\xde\x85\xf1=(\xc8\xc2?\xbe\x9b\x96\x82P^\xc0?\xd6\x05\xe9\xdah{\xa9\xbf\xecj\xc4\xbf\x06\x94\xd3\xbf\x1b"$do\xed\xab?\x85\xff\xf1\xaf\x1ce\xd8?Iv]"\x8c$\xbc\xbf\x17-\x8a\xd5#\xee\xd3\xbf\x9b\x14\x0f\xeav\xa9\xad?\xee\xd6\x1aZ\xe9\xd9\xbf?\xf5^\xb8\x83=J\x9e?(\xe5\xdf\xfb\xa0\xe2y?\xdf\x9a\xad\xbc\xe3\xc6\xb3\xbf\xa5\xf70\xbe\xe0\xe1\x89\xbf7\xdb\x86\x16\x1aL\xa9?\xb2\xe5\x9f\xd2\xf7W\xbf?{\xabp`\x8d\xa4$?\xfb\xa9;\x04\xd3\xc4\xd6\xbf\xf5\x9fDn\xb0F\x9f\xbfZ\xf2\xaa.T\xe3\xd8?A\xd9\x7f\x9fF\xed\xa8?qZ\x1f\x8a-\xd3\xb1\xbfCQ.\xec//\x87?\x80\x1eu{\xca\x1d\xd5\xbf\x1e6\xbc\xf9\x80\x17\xc2\xbfe\xee\xc6\xd8\xb8l\xdb?\x0b\x02\x965\x1b\xc1\xcb?2\xadn\x16\x7fU\xd0\xbf\xa6W\xa6]\x10\x99\xc4\xbf\x19\xf8\xe8\xc3\t\xd0\xb8?\xc0\x01\xfb\x86\x81W\xa6?\xf9\x96\x88znM\xa3\xbf\r|.b \xbd\xa3?WP\x12\n\xb8\xe8\x8e?D\xf3\x14\xb7\xc4\xf4\xad\xbf&\x8a\xf0ZC^\xa9\xbf\xd4\xfa%x\xd3-\xa9?8\xcf(\x8d\xf42\xbb?-9Pg\xd2\xfa\x8a?\xfdr\xbe\x8e\xea\xd0\xbb\xbf\x80\x1f\xaa)2\xab\x96\xbf\xd0A\xb8\x1b\xe5d\xb5?\x9dC\x8c\x96\xde\x83\xb2\xbf\x11\x81\x1eLh\x94\x95\xbf\x92?\x03o\x00h\xce?\xe7:\n\xfc\xff\x03\x99\xbf\xb4\xf1\x98\xb6\xb7\x08\xd5\xbf\xf4>u}\xb8\x1a\x85?\xde\x8c@\xa3\x89\xe2\xc3?\xc3IK(\x0e\xb9\x88?\xcc\xbf\t\xe3\x19\xfe\xc2\xbf\xc8a\xdbT\xfb\xfc{?<\xc5\x08\xea\xec\x10\xd1?C\xacv7#\xc4\xb8\xbf\xbdF\x0en\x89I\xbc\xbf\xa7\xc5\xf1{/J\xc3?Y}\xc2\xacc\xe8\xbb\xbf\x82\xb3}g\xbf[\xbf\xbf*Zj\xfdt\x1f\xcb?\x1a7\xd5 \xe5l~?\x057\xdf#lK\xc0\xbf\xf1\xe1i\x08\xf2C\xae?F\xb5\xf3\x8b\xcfb\x82\xbfa\xc7M\xbc\x8bN\xab?\x07\xc2H\x1f\xcc"\xbf?!\xa0\xea\xd0:\xfb\xcf\xbf\xef\x88\x05\x91\xcco\xcf\xbf\xcc\xea\xeaX\xec/\xd4?\x0c.\x05X\x97j\xd4?i]`\xccD{\xd0\xbf\xc8\x17p@\xea\xc9\xd1\xbf\x06\x85\x7f\xe8\xe1O\xc3?\x83\xa9\n\xb3\xb9\xf3\xaf?\x16\x1f\xae\x15\x81\xba\xa0\xbf\x97B\xfd\xf3/\xf3\xcc?\xa9~\xf5\xf3\xcd\xf0\x9b?\x19"\xfc\x8e\xc8F\xd7\xbf\x9d\xca\xb5\x86<\xe4\xaf\xbf\xf4\x10\xceQFa\xcf?\xdb6\xa3\x9a\xcf\x01\xb3?\xce\x99\xf7S\xf8\x15\x81\xbf\x04(\xe4d\x17L\xa2\xbf\xfaT*\x0648\xab\xbf\x8f\xbc\xf31l\x04\xa9\xbf\x0clrY\x05\xf6\x90\xbf\x03=\x82\xa4\xe0\x8f\xc1?\xd6#\xbdV\xb7\xc4\xbb?\xca\x19\x1ca\x98\xf8\xc7\xbf\xd1\xcb+\'B\x9e\xc7\xbfz\xb4F\xbbx\x80\xc1?\x84\x10\x84*\xbfQ\xbd?\x9e\x16|\xeb\x0c\x11\xa6\xbf\x13\xcf\xc46\x7f7\xb4?\xa4\x0b)\x15$]\x83?b\xb68L%&\xd0\xbf\x03\xea\x0f\xce\x89d\xa5\xbf\xe8\xc8\x0b\r|\xc3\xd4?\xf9\xe8\xbd^\xae\xec\xc0?i\xc7\x1b\x8a\xf7\xaa\xd7\xbf\xe7\xdf\xcc}\xc0t\xc4\xbf\xb3T\xe2\xc8;{\xd8?\x80N\xa2h\x82\xb9\xbe?yeQ\xca\x81\xe5\xd1\xbf\x82YK\x1e$M\xbd\xbf\xab\xac\x05D-\x16\xb7?e\x0b\x81_\nB\xb9?\xe9\xb5\x9a\x93Pd\xc0?e/*%Q\x8c\xa1\xbf*\xc1\x0c\xd4-%\xcb\xbf\x9a<\xfb\xd4w_\x86\xbf\x9c\xb2\x8f\xa7k\xae\xac?\xe3Z%q\x1az\x91\xbf#\x10\xd0\xca\x1cK\xcc?z\xa8\xa39\xab\x8b\xac?t\xa8c\x0e\xeeb\xd4\xbf\xfd\x05\x9a\xd2A\xa3\xa0\xbf\xe9+I\x92\xc0\x8a\xc0?\x90\xf5\xe4(\x9e?\xc1\xbfY\x0e\xed\x12\xc4\xbc\xc1?\x92\'9\xbd\x04\xaa\xd6?\xc5*\xc3\xda1\x94\xcc\xbf\xce\xbe\xb1n\xf8\xd7\xd9\xbf\xdd\xe9\xa6\xde\x9bMx?Pcx\xd8\x85\xef\xd3?\x1dT\x13~\x82f\xcd?-]R\xee})\xc5\xbfu\xa4\xaa@\xb6\x9b\xd2\xbf\x05\x06\xbe\xcf\x07\xf9\xb0?\xfbO_\x9fS\xd0\xcb?3\xe6\x15~\x86*\xa2\xbfNC:"Z\x10\xc0\xbf\xb7\xbbyS\xf8\x0e\x8a\xbf\\\xe9\xb3\x1fs\xa6\xb8?\xfe\xd9c\xc5\xaa\xa5\xb0?6\xe9h\xc5\x0e\x87\xb1\xbfO&&\x8b\x07\t\xb7\xbf-\x97X\xe2\xf4\tv?\x9ay0\xcc\xaeu\xba?\x893\x18\xd6\xb3*\xb5?\n\xe6\x0b\x83s\x81\xbb\xbf"\xb4E\x14\xfb"\xc6\xbf2\x82\xa5\xacC\xf8\xb6?\xb6Cn\xcb?\xdd\xcc?\x92\xff\x17\xd5\xc0H\xa7\xbff7H\xb3\x05\xba\xcc\xbfl*\xd4l\x83\xf3f?\xaf\x1fh8\xf5\xba\xcc?\xff\xed\x84\xdb\xdb\x92q\xbfi\xab\xae=\x89\xb3\xc9\xbf1\xd9\x17\x90\x9emz?\xa2D?b\xd5\x91\xb4?\xde0\xbbqGf\xa1?\x82\xe3\x88D:\xe7w?~\x07\xbd^6\xdf\xba\xbf\x8e\xab\xcb\xf0\x9a\xa4\x93\xbft\xbc\xd5h\x1fc\xc9?\xe8o(\xcd\xf8\xd8\x9c?\xb7\xb3\x0b]\xd6-\xd5\xbfDOm&\x05$\xa1\xbf6;\x902\xeb6\xda?=+W\x9c\xbbu\xa4?\xba\x8d\xe6x\xceE\xd4\xbf\x8d#\\\xe1=\xa4\xad\xbf\xa6\x04\xe4\x16\xa2\x0b\xc4?7\x1f\x1a>c#\xb0?\xf6\xb8rx-\x04\xb9\xbf,\x04\xdd"&\x8a\xa9\xbf\xbb\xb4\xa7\x842.\xba?\xad+\xee\xa8\x8a\x03\xae?Sm\xb9y\x81\x18\xb5\xbf\xda\x81\x18hZ\x8b\xb6\xbf\x92F \x1f\xcd\xe0\xaa?\xfc\xd8\xa0/@6\xc2?\xf0\x8e\xb7=\xd0\xa3\xa0\xbf`\xec\xf2$\x98\xe2\xcb\xbf\xbb\xb5V\xd1\xb4\xa3\x87?\xbe\xd1h\xed\xa2\x0f\xd3?\xe9u=\xf8\x00q_?\xa4\x85\xa6p\xa0Z\xd6\xbf\xe0\x9f\xdf\xedq4_?"\x10\x84?\xeb\xd5\xd6?\xfaw\xcc\xef\x0b/\x83?\x91<Ek\xc3\x8d\xd1\xbf\xf9\xf3\xc5\x97\xcd\x1b\x8a\xbf\x84\xdb\xee\'\x04\xed\xb8?M>\xbd\x91\xbfI\x92\xbf\x17\x84\x95\x14\x84J\xbc?\xd7\xae\xd4\x95\xff"\xaf?\xe5\x01\xc1z\xb8V\xcf\xbfZ\xaa\x17L\x9c\xff\xa4\xbf\xfb-\\>n@\xc4?\xa6\xa3\xe6\x82\xd9\xcb\xab\xbfI\x16\xc1\xfd%r\xa5?\x7f\xf0\xab#\x99\x0f\xc9?p\x99y\x8e\xf3\x07\xc6\xbf5\x15w"\x0e\x17\xd1\xbfw6WuXC\xc8?\xd4\xc4\xff\xc6B\x0e\xc9?$\xbf\xe8)\x90q\xc3\xbf"\x1eE\xff7m\xc1\xbf\x97\xf5\x93\x9c["\xbb?x\xb3\xe4\x8c@\x9e\xa2?4\x9a\xc3\xc7\xce\xb7\xb3\xbf\xac\xf5\x90\xb2f\x8d\xba?9\x9bR\xbc\x87)\xc1?\x1e\x1a\xf5\xab\xe7\x8d\xc9\xbf~]\xf2\x07\x12"\xd1\xbf\x12\x05i\xcd?b\xcd?=]\x8clXf\xd9?\x99\xfe"*\xd5\xd4\xc0\xbf\xd5\xf9\xe6*\xf0\xdd\xd6\xbf\xb0\xb7\xbe\x93\xc0\x0c\xa2?A\xc84\x0f\xfd\xfc\xc1?%\xcb +/\xb9\x7f\xbf\xa0y\xcb\x02\xeb\xcd\xc1?\xbb\xb1(\x1cy\x06\x87?\xda\xb3Q*{\xd0\xd0\xbf*?"rm,\x8b\xbf\x18\x7fn<{\xea\xbd?\xd5\x9f\x16@\x93f\x90\xbf\xbb\xd6>\xf1|L\xc3?\xec\x1e\xd4F\x94\xdd\xb0?\xcc0\x8cQA\xf3\xdb\xbf\xfa\x96O\xb2Lu\xb1\xbfXB\xa2\x87)y\xe1?\xc02\xab/1%\xb1?\xa0\xfb\x12\x88\xb2\x88\xdf\xbf\x05L\xe1{\x84D\xbf\xbf\xcb\x8eS\xa1wM\xd7?\xff\xbd\xe1D\xf2\x8e\xba?\x9b\x8a\xe0\x80\xc4\xda\xc8\xbfZ\xd7M\xdb6x\xa9\xbf\x8bn ;\xe2\xf8\xb5?\xe5\'\x00\x84Iz\xa4?\xdc\xb8\x07\xed\xed\xc4\xa6\xbf$\x94x\x02\xad-\xa0\xbf\xc1C\x1f\rDb\x9b?\x1d2G\x89R\xa5}?\x0c\xca|\xa3c\xf4\x7f\xbf \x18\x1a\xb9\xa3\x90\x91?b\xaaWo<[~\xbf\xc3C\xda\xa3\x8a\x99\xaa\xbf\xcc\xcf5\xaed\xbfp?\x1c\xfc\xd2\x97\xea\xb4\xae?1bk0\x06\xd5\x97?\xb8\xf3\xc03\x08\x7f\xa3\xbf\xef\xae\x01\xe2\xe9W\xa3\xbfvaf\xf36\xab\x9c?\x8a-%\xe6\xb3\xa4\xaf?\xc0i!ho\xc6\x94\xbf\xd3)\x94\x8e\xa0\xa4\xb1\xbfC\x1d\x84\xaf|\x8d\x82?\xa5\x93\xb3\xc8\xd4\x00\x9c?'
|
434 |
+
p179
|
435 |
+
tp180
|
436 |
+
bsg79
|
437 |
+
g64
|
438 |
+
(g65
|
439 |
+
(I0
|
440 |
+
tp181
|
441 |
+
g67
|
442 |
+
tp182
|
443 |
+
Rp183
|
444 |
+
(I1
|
445 |
+
(I34
|
446 |
+
tp184
|
447 |
+
g74
|
448 |
+
I00
|
449 |
+
S'J\x94!\x9e\xa9\xc4P\xc0\xc87\xa4W\xe2LS\xc0\xa8\xbb\xd0S\xb5nG\xc0\xfd;\xcd\nS-S\xc0\x8d)\x83)\xdc\xb8:\xc0\xb1_\xb6\xb4\x81\x8fR\xc00\x81\x8ci\x18\xa5\x1c\xc0x\xf2\xc5\xcc\xb4qQ\xc0\x89p\x95\xd0\xc0\xfa%@\xaf\xa9c|\x1e\xd8N\xc0R\x98!\xa8L\xdd:@\xce\rTB\xf4\xa9H\xc0pu\xc1\x16\n\x13D@\xcad\xd03H(A\xc0\xb2\x14\xc6\xbb9TI@\xe0\xf7T\xfd\xe0\xb31\xc0\'E\x0c\'\xa5\xadJ@\x88a9\xdd\xed\x0f\xee?e\x81#4\x89\xd8H@=\xe4\xb2\xbd\xd083@\x80\xbe\x81\xd2t\\C@\xb4\x1f\x00\\\xd8\xacA@\xe3\xca\x07\x8dR=9@\xe7?\xe0\x02S\x01I@k\xe0\xe7!\x0cc"@\xa0\xcbE\x15a\xf1N@\xf8\xb3\xf0\x85J\xa5"\xc0S\'\xf6\x01\xb6cQ@j_g~\xf7 =\xc0J\xf8<oDZR@\x05Ol\xe59\x98H\xc0\x12\xd0\x13\xff\x7f\xcaR@\xf2\xef\xa4\xef\xc3YQ\xc0\x87\xe0\x05\xe7\xfc\xdaR@'
|
450 |
+
p185
|
451 |
+
tp186
|
452 |
+
bsS'n_samples'
|
453 |
+
p187
|
454 |
+
I3148
|
455 |
+
sS'_n_active_components'
|
456 |
+
p188
|
457 |
+
I30
|
458 |
+
sbsbasS'reference_shape'
|
459 |
+
p189
|
460 |
+
g0
|
461 |
+
(g87
|
462 |
+
g2
|
463 |
+
Ntp190
|
464 |
+
Rp191
|
465 |
+
(dp192
|
466 |
+
g91
|
467 |
+
g64
|
468 |
+
(g65
|
469 |
+
(I0
|
470 |
+
tp193
|
471 |
+
g67
|
472 |
+
tp194
|
473 |
+
Rp195
|
474 |
+
(I1
|
475 |
+
(I17
|
476 |
+
I2
|
477 |
+
tp196
|
478 |
+
g74
|
479 |
+
I00
|
480 |
+
S'\xcf\x1c\xa2\xfci\xe0L@\xf3\x80\x82\xa8h\x0e8@\xd9AM\x15\x0c\x9aS@\xe1\xd6\xf7\xf5\t\xa98@4\x06\xdeIZ\xc1X@\xdf\xf9\xf4\xbch@;@\xc4\x89X\xbe\x89\xc2]@\xe3y\xb3\x9dB\xdc?@G\x97\x12\xff\xda0a@\xd5\xa7H\xd0\x89\x11D@_\xaa\xe1r\x9c3c@$\x9a\xb4\xbd\x02bJ@b\xf0\x8f\x07\xa8\xe0d@\xe6\xc4Y\x0e\x02\x07Q@l|v\x0b\xd42f@\x88\xd5\x88\xc8\xabDU@\x8d\xec\x15\x92-\x89f@z\xd6\x12\x832\x07Z@\xf5\x0eQ\xb0O\x14f@\xf6D\xdfXL\xb2^@\x95\x8d\xc5"\x04\xb3d@\x1a\x91\x8eR\x87fa@\xe0hL\xf6O\xffb@\xff1E\xbb;Dc@*\xdbn\x9d\xcd\xf6`@:\x1a\xb6-=\xc7d@\x15<-Q\x8a7]@)\xd3b4|\xc2e@m\x0f`\xc2\xd8\'X@ud\x1e\x86oBf@\x98\x9a1\x15\xf0\x06S@\xb1\x97\xe5s\xc6~f@\x06;A\x05\x99\xbbK@\xaa\x13\xc2\xfb_\x8af@'
|
481 |
+
p197
|
482 |
+
tp198
|
483 |
+
bsg98
|
484 |
+
Nsbsg48
|
485 |
+
(lp199
|
486 |
+
g49
|
487 |
+
asg51
|
488 |
+
g34
|
489 |
+
sg46
|
490 |
Nsb.
|
MakeItTalk/thirdparty/face_of_art/pdm_clm_models/clm_models/basic_l_brow
CHANGED
@@ -1,490 +1,490 @@
|
|
1 |
-
ccopy_reg
|
2 |
-
_reconstructor
|
3 |
-
p0
|
4 |
-
(cmenpofit.clm.base
|
5 |
-
CLM
|
6 |
-
p1
|
7 |
-
c__builtin__
|
8 |
-
object
|
9 |
-
p2
|
10 |
-
Ntp3
|
11 |
-
Rp4
|
12 |
-
(dp5
|
13 |
-
S'opt'
|
14 |
-
p6
|
15 |
-
(dp7
|
16 |
-
S'ablation'
|
17 |
-
p8
|
18 |
-
(I01
|
19 |
-
I01
|
20 |
-
tp9
|
21 |
-
sS'verbose'
|
22 |
-
p10
|
23 |
-
I00
|
24 |
-
sS'rho2'
|
25 |
-
p11
|
26 |
-
I20
|
27 |
-
sS'sigRate'
|
28 |
-
p12
|
29 |
-
F0.25
|
30 |
-
sS'ratio2'
|
31 |
-
p13
|
32 |
-
F0.08
|
33 |
-
sS'imgDir'
|
34 |
-
p14
|
35 |
-
S'/Users/arik/Desktop/artistic_faces/applications/AF_sample'
|
36 |
-
p15
|
37 |
-
sS'dataset'
|
38 |
-
p16
|
39 |
-
S'demo'
|
40 |
-
p17
|
41 |
-
sS'ratio1'
|
42 |
-
p18
|
43 |
-
F0.12
|
44 |
-
sS'smooth'
|
45 |
-
p19
|
46 |
-
I01
|
47 |
-
sS'pdm_rho'
|
48 |
-
p20
|
49 |
-
I20
|
50 |
-
sS'sigOffset'
|
51 |
-
p21
|
52 |
-
I25
|
53 |
-
sS'kernel_covariance'
|
54 |
-
p22
|
55 |
-
I10
|
56 |
-
sS'numIter'
|
57 |
-
p23
|
58 |
-
I5
|
59 |
-
ssS'_shape_model_cls'
|
60 |
-
p24
|
61 |
-
(lp25
|
62 |
-
cmenpofit.modelinstance
|
63 |
-
OrthoPDM
|
64 |
-
p26
|
65 |
-
asS'max_shape_components'
|
66 |
-
p27
|
67 |
-
(lp28
|
68 |
-
NasS'scales'
|
69 |
-
p29
|
70 |
-
(lp30
|
71 |
-
I1
|
72 |
-
asS'diagonal'
|
73 |
-
p31
|
74 |
-
I200
|
75 |
-
sS'holistic_features'
|
76 |
-
p32
|
77 |
-
(lp33
|
78 |
-
cmenpo.feature.features
|
79 |
-
no_op
|
80 |
-
p34
|
81 |
-
asS'patch_shape'
|
82 |
-
p35
|
83 |
-
(lp36
|
84 |
-
(I8
|
85 |
-
I8
|
86 |
-
tp37
|
87 |
-
asS'expert_ensemble_cls'
|
88 |
-
p38
|
89 |
-
(lp39
|
90 |
-
cmenpofit.clm.expert.ensemble
|
91 |
-
FcnFilterExpertEnsemble
|
92 |
-
p40
|
93 |
-
asS'expert_ensembles'
|
94 |
-
p41
|
95 |
-
(lp42
|
96 |
-
g0
|
97 |
-
(g40
|
98 |
-
g2
|
99 |
-
Ntp43
|
100 |
-
Rp44
|
101 |
-
(dp45
|
102 |
-
S'sample_offsets'
|
103 |
-
p46
|
104 |
-
NsS'cosine_mask'
|
105 |
-
p47
|
106 |
-
I01
|
107 |
-
sS'context_shape'
|
108 |
-
p48
|
109 |
-
(I8
|
110 |
-
I8
|
111 |
-
tp49
|
112 |
-
sg35
|
113 |
-
g37
|
114 |
-
sS'response_covariance'
|
115 |
-
p50
|
116 |
-
I3
|
117 |
-
sS'patch_normalisation'
|
118 |
-
p51
|
119 |
-
g34
|
120 |
-
sS'_icf'
|
121 |
-
p52
|
122 |
-
Nsbasg47
|
123 |
-
I01
|
124 |
-
sS'shape_models'
|
125 |
-
p53
|
126 |
-
(lp54
|
127 |
-
g0
|
128 |
-
(g26
|
129 |
-
g2
|
130 |
-
Ntp55
|
131 |
-
Rp56
|
132 |
-
(dp57
|
133 |
-
S'similarity_model'
|
134 |
-
p58
|
135 |
-
g0
|
136 |
-
(cmenpofit.modelinstance
|
137 |
-
_SimilarityModel
|
138 |
-
p59
|
139 |
-
g2
|
140 |
-
Ntp60
|
141 |
-
Rp61
|
142 |
-
(dp62
|
143 |
-
S'_components'
|
144 |
-
p63
|
145 |
-
cnumpy.core.multiarray
|
146 |
-
_reconstruct
|
147 |
-
p64
|
148 |
-
(cnumpy
|
149 |
-
ndarray
|
150 |
-
p65
|
151 |
-
(I0
|
152 |
-
tp66
|
153 |
-
S'b'
|
154 |
-
p67
|
155 |
-
tp68
|
156 |
-
Rp69
|
157 |
-
(I1
|
158 |
-
(I4
|
159 |
-
I10
|
160 |
-
tp70
|
161 |
-
cnumpy
|
162 |
-
dtype
|
163 |
-
p71
|
164 |
-
(S'f8'
|
165 |
-
p72
|
166 |
-
I0
|
167 |
-
I1
|
168 |
-
tp73
|
169 |
-
Rp74
|
170 |
-
(I3
|
171 |
-
S'<'
|
172 |
-
p75
|
173 |
-
NNNI-1
|
174 |
-
I-1
|
175 |
-
I0
|
176 |
-
tp76
|
177 |
-
bI00
|
178 |
-
S"x-G|\xee\x1b\xc5\xbf\xab;\xdb\xeb\x84\x92\xe2?\xb5v\n8\xd5\xe1\xab?N\xa52\xe4%*\xd6?\xb4\x0b\x91\xb9T%\xbd?\xd2j\xc9\x94\x15[\x91?\xb7\xfd9FxM\xb0?YB\x01\xf3\x9d}\xd4\xbf\x1b\xeaA\xa3\xda+\xb1\xbf\xa48\x1a\x91\xa1\xf3\xe3\xbf\xaa;\xdb\xeb\x84\x92\xe2\xbf|-G|\xee\x1b\xc5\xbfO\xa52\xe4%*\xd6\xbf\xb0v\n8\xd5\xe1\xab?\xd7j\xc9\x94\x15[\x91\xbf\xb4\x0b\x91\xb9T%\xbd?ZB\x01\xf3\x9d}\xd4?\xbc\xfd9FxM\xb0?\xa58\x1a\x91\xa1\xf3\xe3?\x15\xeaA\xa3\xda+\xb1\xbf\xec\xed\xbf\xc5%\x9f\xdc\xbf\xfb0\x86\x9a1r\xc6\xbc\xe8\xed\xbf\xc5%\x9f\xdc\xbf\xf8B\xbc\xc2\x90\x85\x84\xbc\xdb\xed\xbf\xc5%\x9f\xdc\xbfc7\x92\xc7_\xe6\xb0<\xce\xed\xbf\xc5%\x9f\xdc\xbf\xf2\xc2\x19\x03g\xd9\xb5<\xc1\xed\xbf\xc5%\x9f\xdc\xbf;\xfb\x91\xb5.l\xa8<Z\x12T\x06\xf4\xcc\xc4<\xed\xed\xbf\xc5%\x9f\xdc\xbf<\xb7<\xc0\x98\xc4\x8d<\xe7\xed\xbf\xc5%\x9f\xdc\xbf\xfez\x81\x10y`\xb0\xbc\xd9\xed\xbf\xc5%\x9f\xdc\xbf\xc2\xda\xedh'\x07\xb4\xbc\xce\xed\xbf\xc5%\x9f\xdc\xbfQ\xec\xc2\x13\xb2\xb8\xa5\xbc\xc3\xed\xbf\xc5%\x9f\xdc\xbf"
|
179 |
-
p77
|
180 |
-
tp78
|
181 |
-
bsS'_mean'
|
182 |
-
p79
|
183 |
-
g64
|
184 |
-
(g65
|
185 |
-
(I0
|
186 |
-
tp80
|
187 |
-
g67
|
188 |
-
tp81
|
189 |
-
Rp82
|
190 |
-
(I1
|
191 |
-
(I10
|
192 |
-
tp83
|
193 |
-
g74
|
194 |
-
I00
|
195 |
-
S'I`\xad\xf8b\xe29@W\xa8\x8d\x1e\x11\xc6V\xc0\xf6G\x8c\x0eC\x18!\xc0F\xa7\x80\xf3\xba-K\xc0\xb4\xa2s\xe6\x99\xde1\xc0\xa1\xcb\xc60.H\x05\xc0e\xaba+\x93\xfd#\xc0\xa23\xd0<B I@\x8bxz\x15D\x0e%@\xb7\x18l\xeb\x0ewX@'
|
196 |
-
p84
|
197 |
-
tp85
|
198 |
-
bsS'template_instance'
|
199 |
-
p86
|
200 |
-
g0
|
201 |
-
(cmenpo.shape.pointcloud
|
202 |
-
PointCloud
|
203 |
-
p87
|
204 |
-
g2
|
205 |
-
Ntp88
|
206 |
-
Rp89
|
207 |
-
(dp90
|
208 |
-
S'points'
|
209 |
-
p91
|
210 |
-
g64
|
211 |
-
(g65
|
212 |
-
(I0
|
213 |
-
tp92
|
214 |
-
g67
|
215 |
-
tp93
|
216 |
-
Rp94
|
217 |
-
(I1
|
218 |
-
(I5
|
219 |
-
I2
|
220 |
-
tp95
|
221 |
-
g74
|
222 |
-
I00
|
223 |
-
S'I`\xad\xf8b\xe29@W\xa8\x8d\x1e\x11\xc6V\xc0\xf6G\x8c\x0eC\x18!\xc0F\xa7\x80\xf3\xba-K\xc0\xb4\xa2s\xe6\x99\xde1\xc0\xa1\xcb\xc60.H\x05\xc0e\xaba+\x93\xfd#\xc0\xa23\xd0<B I@\x8bxz\x15D\x0e%@\xb7\x18l\xeb\x0ewX@'
|
224 |
-
p96
|
225 |
-
tp97
|
226 |
-
bsS'_landmarks'
|
227 |
-
p98
|
228 |
-
NsbsbsS'similarity_weights'
|
229 |
-
p99
|
230 |
-
g64
|
231 |
-
(g65
|
232 |
-
(I0
|
233 |
-
tp100
|
234 |
-
g67
|
235 |
-
tp101
|
236 |
-
Rp102
|
237 |
-
(I1
|
238 |
-
(I4
|
239 |
-
tp103
|
240 |
-
g74
|
241 |
-
I00
|
242 |
-
S'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
|
243 |
-
p104
|
244 |
-
tp105
|
245 |
-
bsS'_weights'
|
246 |
-
p106
|
247 |
-
g64
|
248 |
-
(g65
|
249 |
-
(I0
|
250 |
-
tp107
|
251 |
-
g67
|
252 |
-
tp108
|
253 |
-
Rp109
|
254 |
-
(I1
|
255 |
-
(I6
|
256 |
-
tp110
|
257 |
-
g74
|
258 |
-
I00
|
259 |
-
S'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
|
260 |
-
p111
|
261 |
-
tp112
|
262 |
-
bsS'_target'
|
263 |
-
p113
|
264 |
-
g0
|
265 |
-
(g87
|
266 |
-
g2
|
267 |
-
Ntp114
|
268 |
-
Rp115
|
269 |
-
(dp116
|
270 |
-
g91
|
271 |
-
g64
|
272 |
-
(g65
|
273 |
-
(I0
|
274 |
-
tp117
|
275 |
-
g67
|
276 |
-
tp118
|
277 |
-
Rp119
|
278 |
-
(I1
|
279 |
-
(I5
|
280 |
-
I2
|
281 |
-
tp120
|
282 |
-
g74
|
283 |
-
I00
|
284 |
-
S'Q`\xad\xf8b\xe29@X\xa8\x8d\x1e\x11\xc6V\xc0\xf0G\x8c\x0eC\x18!\xc0H\xa7\x80\xf3\xba-K\xc0\xb5\xa2s\xe6\x99\xde1\xc0\xa6\xcb\xc60.H\x05\xc0m\xaba+\x93\xfd#\xc0\xa33\xd0<B I@\x7fxz\x15D\x0e%@\xb9\x18l\xeb\x0ewX@'
|
285 |
-
p121
|
286 |
-
tp122
|
287 |
-
bsg98
|
288 |
-
NsbsS'global_transform'
|
289 |
-
p123
|
290 |
-
g0
|
291 |
-
(cmenpofit.transform.homogeneous
|
292 |
-
DifferentiableAlignmentSimilarity
|
293 |
-
p124
|
294 |
-
g2
|
295 |
-
Ntp125
|
296 |
-
Rp126
|
297 |
-
(dp127
|
298 |
-
S'_h_matrix'
|
299 |
-
p128
|
300 |
-
g64
|
301 |
-
(g65
|
302 |
-
(I0
|
303 |
-
tp129
|
304 |
-
g67
|
305 |
-
tp130
|
306 |
-
Rp131
|
307 |
-
(I1
|
308 |
-
(I3
|
309 |
-
I3
|
310 |
-
tp132
|
311 |
-
g74
|
312 |
-
I00
|
313 |
-
S'\x01\x00\x00\x00\x00\x00\xf0?\x14?\xffim\xc5\xb0\xbc\x00\x00\x00\x00\x00\x00\x00:\x9e\x15A\xa5\x81\xc6\x98<\x01\x00\x00\x00\x00\x00\xf0?\x00\x00\x00\x00\x00\x00\x00\xba\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0?'
|
314 |
-
p133
|
315 |
-
tp134
|
316 |
-
bsg113
|
317 |
-
g0
|
318 |
-
(g87
|
319 |
-
g2
|
320 |
-
Ntp135
|
321 |
-
Rp136
|
322 |
-
(dp137
|
323 |
-
g91
|
324 |
-
g64
|
325 |
-
(g65
|
326 |
-
(I0
|
327 |
-
tp138
|
328 |
-
g67
|
329 |
-
tp139
|
330 |
-
Rp140
|
331 |
-
(I1
|
332 |
-
(I5
|
333 |
-
I2
|
334 |
-
tp141
|
335 |
-
g74
|
336 |
-
I00
|
337 |
-
S'I`\xad\xf8b\xe29@W\xa8\x8d\x1e\x11\xc6V\xc0\xf6G\x8c\x0eC\x18!\xc0F\xa7\x80\xf3\xba-K\xc0\xb4\xa2s\xe6\x99\xde1\xc0\xa1\xcb\xc60.H\x05\xc0e\xaba+\x93\xfd#\xc0\xa23\xd0<B I@\x8bxz\x15D\x0e%@\xb7\x18l\xeb\x0ewX@'
|
338 |
-
p142
|
339 |
-
tp143
|
340 |
-
bsg98
|
341 |
-
NsbsS'allow_mirror'
|
342 |
-
p144
|
343 |
-
I00
|
344 |
-
sS'_source'
|
345 |
-
p145
|
346 |
-
g136
|
347 |
-
sbsS'model'
|
348 |
-
p146
|
349 |
-
g0
|
350 |
-
(cmenpo.model.pca
|
351 |
-
PCAModel
|
352 |
-
p147
|
353 |
-
g2
|
354 |
-
Ntp148
|
355 |
-
Rp149
|
356 |
-
(dp150
|
357 |
-
S'centred'
|
358 |
-
p151
|
359 |
-
I01
|
360 |
-
sg86
|
361 |
-
g0
|
362 |
-
(g87
|
363 |
-
g2
|
364 |
-
Ntp152
|
365 |
-
Rp153
|
366 |
-
(dp154
|
367 |
-
g91
|
368 |
-
g64
|
369 |
-
(g65
|
370 |
-
(I0
|
371 |
-
tp155
|
372 |
-
g67
|
373 |
-
tp156
|
374 |
-
Rp157
|
375 |
-
(I1
|
376 |
-
(I5
|
377 |
-
I2
|
378 |
-
tp158
|
379 |
-
g74
|
380 |
-
I00
|
381 |
-
S"\xd4\xe1\rs\n\x8b2@\xa7\xff\xf6\x06\x07\xc1X\xc0+\x97u\x96\xcb\xd4\x01\xc0\xf1\r'/U\xf0H\xc0\x06\xbcznz\xf6$\xc0\x12\x14\xe1]\xc6\x16\x00@\xee\xca_\xdePN$\xc0\x12\x9aDLYeH@\x86R\xb8\x98\xd2G\x11@\t1y\xc5\xce\x85X@"
|
382 |
-
p159
|
383 |
-
tp160
|
384 |
-
bsg98
|
385 |
-
NsbsS'_eigenvalues'
|
386 |
-
p161
|
387 |
-
g64
|
388 |
-
(g65
|
389 |
-
(I0
|
390 |
-
tp162
|
391 |
-
g67
|
392 |
-
tp163
|
393 |
-
Rp164
|
394 |
-
(I1
|
395 |
-
(I6
|
396 |
-
tp165
|
397 |
-
g74
|
398 |
-
I00
|
399 |
-
S'\x04\x9b\x8d\xa8\x00\x94d@\xeex\x9f\x01\xe1\xd6D@\xd2\x00?\xe1\xdd\xcd/@\xaaoX\xe3ss"@Y8\xa1\xec\x80O\x1c@o\xb6\xf3\xc4\xd7\xf4\x10@'
|
400 |
-
p166
|
401 |
-
tp167
|
402 |
-
bsS'_trimmed_eigenvalues'
|
403 |
-
p168
|
404 |
-
g64
|
405 |
-
(g65
|
406 |
-
(I0
|
407 |
-
tp169
|
408 |
-
g67
|
409 |
-
tp170
|
410 |
-
Rp171
|
411 |
-
(I1
|
412 |
-
(I1
|
413 |
-
tp172
|
414 |
-
g74
|
415 |
-
I00
|
416 |
-
S'\xc0Q\x01\xab\xf7\xcc\xf1?'
|
417 |
-
p173
|
418 |
-
tp174
|
419 |
-
bsg63
|
420 |
-
g64
|
421 |
-
(g65
|
422 |
-
(I0
|
423 |
-
tp175
|
424 |
-
g67
|
425 |
-
tp176
|
426 |
-
Rp177
|
427 |
-
(I1
|
428 |
-
(I6
|
429 |
-
I10
|
430 |
-
tp178
|
431 |
-
g74
|
432 |
-
I00
|
433 |
-
S'\xbcL\xa6T\x9c\xa8\xde?\xe39\x13l\x125\xd8?CS\xb6\xae\xab\xd2\xd3\xbf\xc5\xf1arQ\xcc\xc9\xbf:\xec\x14\xa9\x13\x81\xdf\xbfGQI{\xf6\x94\xbd\xbf\x97p\xf2\t\xda[\xc1\xbf\xd0[2\xd9\x0fN\x92?\x11+\x1e\x08\x10Y\xdd?\xafI\x8cF4:\xb4\xbf5\xfbM\xaf9-\xbf\xbfG\xe2\x05\x16P\xaa\xdb?\xf48\xed\x82&\xda\xd3\xbf\xa8t\xe8\x98\x8f\xf5\xd3\xbf\x8c+\xfd\x00\x81\xb6\xd0?}\xcd\xca\x9c\xb9B\xd5\xbf\xd6\x0ee,\xcd\xd8\xd6?\xfdU\xc6\x1f\x00\xf3\xce\xbfC\x05C}\xb2\xd3\xc7\xbf\xda\x8a\x90/y\x07\xdd?\xa2\x84\x04)a\x86\xd6\xbfY\x96\xa9\xbdV\xa3\xc5?0\xdc\x0f\x06\x9fQ\xe1?\xc3(\x12\xd5\xc4\xb4\xb5?\xfc\xf7s\xe8\xbd\xac\xc1\xbf\x03T\xb9\x0b\xe6\xa1\xdb\xbf\xc5\xf2\x82P\xa3\xc0\xd7\xbf\x04v\xac\x89LR\xbe\xbf\x01\xbb\xa1a%z\xd4?&\x1c\x0b\x9a\x9c\xf7\xd2?\x06;n\xd6\xa0s\xc3?\xf3\xd4\x15\xde\x92_\xcc\xbf\x9f\xa6\xc8\xa6\xa0*\x86?\xc8?tF\x88\x17\xe0?\xa0\x140\xd0G\xde\xdd\xbf\xe0*Ld\xd4:\xa4\xbf\x97@~\xe2\xc2\xbd\xd8?\xe7\xa7M0\xef>\xdf\xbf\xbb:\xee\n\x82*\xb5\xbfI0\xf3=\x05\x8e\xcf?\xc42\x1a\x8d\x83\n\xcb\xbfA\x89\x8f%\xbcS\xc5?,J\xf9O\xff\xff\xdb?\xd0\x16\xdb|\xf1\xfc\xdd\xbfP\x0bU\x98\x03:\xde\xbf\xea\xe1T\x0c\xd8{\xd1?\xe7\xf10\xa8\x81G\xda?`\xf0\xd2\x01v\r\xbf?\xc2.\x902w\x10\xc5\xbf9/\xd9\x8a\x88\xb0\xb7\xbf\xf5\xb0\x85\xf6\xb5\xed|\xbf\x9e_V\x9b\xe03\x98?\xc6z\x14Ns\x10\x97?x\xa0M\x01>?\xcf\xbf\x849y\xd7\xa8Y\xc0?\x92\x95\x06"\xea?\xe4?\x01\xf5\x81S\xa7d\xd3\xbf\xee\xa1\xb7\x14\x87f\xe3\xbf\xafN|\x15\x05u\xc4?\x02\x06\xa7\xb85\xd3\xc8?'
|
434 |
-
p179
|
435 |
-
tp180
|
436 |
-
bsg79
|
437 |
-
g64
|
438 |
-
(g65
|
439 |
-
(I0
|
440 |
-
tp181
|
441 |
-
g67
|
442 |
-
tp182
|
443 |
-
Rp183
|
444 |
-
(I1
|
445 |
-
(I10
|
446 |
-
tp184
|
447 |
-
g74
|
448 |
-
I00
|
449 |
-
S'I`\xad\xf8b\xe29@W\xa8\x8d\x1e\x11\xc6V\xc0\xf6G\x8c\x0eC\x18!\xc0F\xa7\x80\xf3\xba-K\xc0\xb4\xa2s\xe6\x99\xde1\xc0\xa1\xcb\xc60.H\x05\xc0e\xaba+\x93\xfd#\xc0\xa23\xd0<B I@\x8bxz\x15D\x0e%@\xb7\x18l\xeb\x0ewX@'
|
450 |
-
p185
|
451 |
-
tp186
|
452 |
-
bsS'n_samples'
|
453 |
-
p187
|
454 |
-
I3148
|
455 |
-
sS'_n_active_components'
|
456 |
-
p188
|
457 |
-
I6
|
458 |
-
sbsbasS'reference_shape'
|
459 |
-
p189
|
460 |
-
g0
|
461 |
-
(g87
|
462 |
-
g2
|
463 |
-
Ntp190
|
464 |
-
Rp191
|
465 |
-
(dp192
|
466 |
-
g91
|
467 |
-
g64
|
468 |
-
(g65
|
469 |
-
(I0
|
470 |
-
tp193
|
471 |
-
g67
|
472 |
-
tp194
|
473 |
-
Rp195
|
474 |
-
(I1
|
475 |
-
(I5
|
476 |
-
I2
|
477 |
-
tp196
|
478 |
-
g74
|
479 |
-
I00
|
480 |
-
S"\xe8\r\xf7\xfcN$d@v4$\x98T\x08b@(sf\x9ec\x9f_@\x8aan\x9e\x0b\xbaf@\xbc\xbfJ\xf7\xc4\x1c]@\xd2\xdd\xa9\x93DVm@\x8bu\xea)\x97\x05_@_\xcf\xfc\x1a'\x18r@\x03)\xd1\\\xd8$b@`\xe2\x9b\xbaE3u@"
|
481 |
-
p197
|
482 |
-
tp198
|
483 |
-
bsg98
|
484 |
-
Nsbsg48
|
485 |
-
(lp199
|
486 |
-
g49
|
487 |
-
asg51
|
488 |
-
g34
|
489 |
-
sg46
|
490 |
Nsb.
|
|
|
1 |
+
ccopy_reg
|
2 |
+
_reconstructor
|
3 |
+
p0
|
4 |
+
(cmenpofit.clm.base
|
5 |
+
CLM
|
6 |
+
p1
|
7 |
+
c__builtin__
|
8 |
+
object
|
9 |
+
p2
|
10 |
+
Ntp3
|
11 |
+
Rp4
|
12 |
+
(dp5
|
13 |
+
S'opt'
|
14 |
+
p6
|
15 |
+
(dp7
|
16 |
+
S'ablation'
|
17 |
+
p8
|
18 |
+
(I01
|
19 |
+
I01
|
20 |
+
tp9
|
21 |
+
sS'verbose'
|
22 |
+
p10
|
23 |
+
I00
|
24 |
+
sS'rho2'
|
25 |
+
p11
|
26 |
+
I20
|
27 |
+
sS'sigRate'
|
28 |
+
p12
|
29 |
+
F0.25
|
30 |
+
sS'ratio2'
|
31 |
+
p13
|
32 |
+
F0.08
|
33 |
+
sS'imgDir'
|
34 |
+
p14
|
35 |
+
S'/Users/arik/Desktop/artistic_faces/applications/AF_sample'
|
36 |
+
p15
|
37 |
+
sS'dataset'
|
38 |
+
p16
|
39 |
+
S'demo'
|
40 |
+
p17
|
41 |
+
sS'ratio1'
|
42 |
+
p18
|
43 |
+
F0.12
|
44 |
+
sS'smooth'
|
45 |
+
p19
|
46 |
+
I01
|
47 |
+
sS'pdm_rho'
|
48 |
+
p20
|
49 |
+
I20
|
50 |
+
sS'sigOffset'
|
51 |
+
p21
|
52 |
+
I25
|
53 |
+
sS'kernel_covariance'
|
54 |
+
p22
|
55 |
+
I10
|
56 |
+
sS'numIter'
|
57 |
+
p23
|
58 |
+
I5
|
59 |
+
ssS'_shape_model_cls'
|
60 |
+
p24
|
61 |
+
(lp25
|
62 |
+
cmenpofit.modelinstance
|
63 |
+
OrthoPDM
|
64 |
+
p26
|
65 |
+
asS'max_shape_components'
|
66 |
+
p27
|
67 |
+
(lp28
|
68 |
+
NasS'scales'
|
69 |
+
p29
|
70 |
+
(lp30
|
71 |
+
I1
|
72 |
+
asS'diagonal'
|
73 |
+
p31
|
74 |
+
I200
|
75 |
+
sS'holistic_features'
|
76 |
+
p32
|
77 |
+
(lp33
|
78 |
+
cmenpo.feature.features
|
79 |
+
no_op
|
80 |
+
p34
|
81 |
+
asS'patch_shape'
|
82 |
+
p35
|
83 |
+
(lp36
|
84 |
+
(I8
|
85 |
+
I8
|
86 |
+
tp37
|
87 |
+
asS'expert_ensemble_cls'
|
88 |
+
p38
|
89 |
+
(lp39
|
90 |
+
cmenpofit.clm.expert.ensemble
|
91 |
+
FcnFilterExpertEnsemble
|
92 |
+
p40
|
93 |
+
asS'expert_ensembles'
|
94 |
+
p41
|
95 |
+
(lp42
|
96 |
+
g0
|
97 |
+
(g40
|
98 |
+
g2
|
99 |
+
Ntp43
|
100 |
+
Rp44
|
101 |
+
(dp45
|
102 |
+
S'sample_offsets'
|
103 |
+
p46
|
104 |
+
NsS'cosine_mask'
|
105 |
+
p47
|
106 |
+
I01
|
107 |
+
sS'context_shape'
|
108 |
+
p48
|
109 |
+
(I8
|
110 |
+
I8
|
111 |
+
tp49
|
112 |
+
sg35
|
113 |
+
g37
|
114 |
+
sS'response_covariance'
|
115 |
+
p50
|
116 |
+
I3
|
117 |
+
sS'patch_normalisation'
|
118 |
+
p51
|
119 |
+
g34
|
120 |
+
sS'_icf'
|
121 |
+
p52
|
122 |
+
Nsbasg47
|
123 |
+
I01
|
124 |
+
sS'shape_models'
|
125 |
+
p53
|
126 |
+
(lp54
|
127 |
+
g0
|
128 |
+
(g26
|
129 |
+
g2
|
130 |
+
Ntp55
|
131 |
+
Rp56
|
132 |
+
(dp57
|
133 |
+
S'similarity_model'
|
134 |
+
p58
|
135 |
+
g0
|
136 |
+
(cmenpofit.modelinstance
|
137 |
+
_SimilarityModel
|
138 |
+
p59
|
139 |
+
g2
|
140 |
+
Ntp60
|
141 |
+
Rp61
|
142 |
+
(dp62
|
143 |
+
S'_components'
|
144 |
+
p63
|
145 |
+
cnumpy.core.multiarray
|
146 |
+
_reconstruct
|
147 |
+
p64
|
148 |
+
(cnumpy
|
149 |
+
ndarray
|
150 |
+
p65
|
151 |
+
(I0
|
152 |
+
tp66
|
153 |
+
S'b'
|
154 |
+
p67
|
155 |
+
tp68
|
156 |
+
Rp69
|
157 |
+
(I1
|
158 |
+
(I4
|
159 |
+
I10
|
160 |
+
tp70
|
161 |
+
cnumpy
|
162 |
+
dtype
|
163 |
+
p71
|
164 |
+
(S'f8'
|
165 |
+
p72
|
166 |
+
I0
|
167 |
+
I1
|
168 |
+
tp73
|
169 |
+
Rp74
|
170 |
+
(I3
|
171 |
+
S'<'
|
172 |
+
p75
|
173 |
+
NNNI-1
|
174 |
+
I-1
|
175 |
+
I0
|
176 |
+
tp76
|
177 |
+
bI00
|
178 |
+
S"x-G|\xee\x1b\xc5\xbf\xab;\xdb\xeb\x84\x92\xe2?\xb5v\n8\xd5\xe1\xab?N\xa52\xe4%*\xd6?\xb4\x0b\x91\xb9T%\xbd?\xd2j\xc9\x94\x15[\x91?\xb7\xfd9FxM\xb0?YB\x01\xf3\x9d}\xd4\xbf\x1b\xeaA\xa3\xda+\xb1\xbf\xa48\x1a\x91\xa1\xf3\xe3\xbf\xaa;\xdb\xeb\x84\x92\xe2\xbf|-G|\xee\x1b\xc5\xbfO\xa52\xe4%*\xd6\xbf\xb0v\n8\xd5\xe1\xab?\xd7j\xc9\x94\x15[\x91\xbf\xb4\x0b\x91\xb9T%\xbd?ZB\x01\xf3\x9d}\xd4?\xbc\xfd9FxM\xb0?\xa58\x1a\x91\xa1\xf3\xe3?\x15\xeaA\xa3\xda+\xb1\xbf\xec\xed\xbf\xc5%\x9f\xdc\xbf\xfb0\x86\x9a1r\xc6\xbc\xe8\xed\xbf\xc5%\x9f\xdc\xbf\xf8B\xbc\xc2\x90\x85\x84\xbc\xdb\xed\xbf\xc5%\x9f\xdc\xbfc7\x92\xc7_\xe6\xb0<\xce\xed\xbf\xc5%\x9f\xdc\xbf\xf2\xc2\x19\x03g\xd9\xb5<\xc1\xed\xbf\xc5%\x9f\xdc\xbf;\xfb\x91\xb5.l\xa8<Z\x12T\x06\xf4\xcc\xc4<\xed\xed\xbf\xc5%\x9f\xdc\xbf<\xb7<\xc0\x98\xc4\x8d<\xe7\xed\xbf\xc5%\x9f\xdc\xbf\xfez\x81\x10y`\xb0\xbc\xd9\xed\xbf\xc5%\x9f\xdc\xbf\xc2\xda\xedh'\x07\xb4\xbc\xce\xed\xbf\xc5%\x9f\xdc\xbfQ\xec\xc2\x13\xb2\xb8\xa5\xbc\xc3\xed\xbf\xc5%\x9f\xdc\xbf"
|
179 |
+
p77
|
180 |
+
tp78
|
181 |
+
bsS'_mean'
|
182 |
+
p79
|
183 |
+
g64
|
184 |
+
(g65
|
185 |
+
(I0
|
186 |
+
tp80
|
187 |
+
g67
|
188 |
+
tp81
|
189 |
+
Rp82
|
190 |
+
(I1
|
191 |
+
(I10
|
192 |
+
tp83
|
193 |
+
g74
|
194 |
+
I00
|
195 |
+
S'I`\xad\xf8b\xe29@W\xa8\x8d\x1e\x11\xc6V\xc0\xf6G\x8c\x0eC\x18!\xc0F\xa7\x80\xf3\xba-K\xc0\xb4\xa2s\xe6\x99\xde1\xc0\xa1\xcb\xc60.H\x05\xc0e\xaba+\x93\xfd#\xc0\xa23\xd0<B I@\x8bxz\x15D\x0e%@\xb7\x18l\xeb\x0ewX@'
|
196 |
+
p84
|
197 |
+
tp85
|
198 |
+
bsS'template_instance'
|
199 |
+
p86
|
200 |
+
g0
|
201 |
+
(cmenpo.shape.pointcloud
|
202 |
+
PointCloud
|
203 |
+
p87
|
204 |
+
g2
|
205 |
+
Ntp88
|
206 |
+
Rp89
|
207 |
+
(dp90
|
208 |
+
S'points'
|
209 |
+
p91
|
210 |
+
g64
|
211 |
+
(g65
|
212 |
+
(I0
|
213 |
+
tp92
|
214 |
+
g67
|
215 |
+
tp93
|
216 |
+
Rp94
|
217 |
+
(I1
|
218 |
+
(I5
|
219 |
+
I2
|
220 |
+
tp95
|
221 |
+
g74
|
222 |
+
I00
|
223 |
+
S'I`\xad\xf8b\xe29@W\xa8\x8d\x1e\x11\xc6V\xc0\xf6G\x8c\x0eC\x18!\xc0F\xa7\x80\xf3\xba-K\xc0\xb4\xa2s\xe6\x99\xde1\xc0\xa1\xcb\xc60.H\x05\xc0e\xaba+\x93\xfd#\xc0\xa23\xd0<B I@\x8bxz\x15D\x0e%@\xb7\x18l\xeb\x0ewX@'
|
224 |
+
p96
|
225 |
+
tp97
|
226 |
+
bsS'_landmarks'
|
227 |
+
p98
|
228 |
+
NsbsbsS'similarity_weights'
|
229 |
+
p99
|
230 |
+
g64
|
231 |
+
(g65
|
232 |
+
(I0
|
233 |
+
tp100
|
234 |
+
g67
|
235 |
+
tp101
|
236 |
+
Rp102
|
237 |
+
(I1
|
238 |
+
(I4
|
239 |
+
tp103
|
240 |
+
g74
|
241 |
+
I00
|
242 |
+
S'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
|
243 |
+
p104
|
244 |
+
tp105
|
245 |
+
bsS'_weights'
|
246 |
+
p106
|
247 |
+
g64
|
248 |
+
(g65
|
249 |
+
(I0
|
250 |
+
tp107
|
251 |
+
g67
|
252 |
+
tp108
|
253 |
+
Rp109
|
254 |
+
(I1
|
255 |
+
(I6
|
256 |
+
tp110
|
257 |
+
g74
|
258 |
+
I00
|
259 |
+
S'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
|
260 |
+
p111
|
261 |
+
tp112
|
262 |
+
bsS'_target'
|
263 |
+
p113
|
264 |
+
g0
|
265 |
+
(g87
|
266 |
+
g2
|
267 |
+
Ntp114
|
268 |
+
Rp115
|
269 |
+
(dp116
|
270 |
+
g91
|
271 |
+
g64
|
272 |
+
(g65
|
273 |
+
(I0
|
274 |
+
tp117
|
275 |
+
g67
|
276 |
+
tp118
|
277 |
+
Rp119
|
278 |
+
(I1
|
279 |
+
(I5
|
280 |
+
I2
|
281 |
+
tp120
|
282 |
+
g74
|
283 |
+
I00
|
284 |
+
S'Q`\xad\xf8b\xe29@X\xa8\x8d\x1e\x11\xc6V\xc0\xf0G\x8c\x0eC\x18!\xc0H\xa7\x80\xf3\xba-K\xc0\xb5\xa2s\xe6\x99\xde1\xc0\xa6\xcb\xc60.H\x05\xc0m\xaba+\x93\xfd#\xc0\xa33\xd0<B I@\x7fxz\x15D\x0e%@\xb9\x18l\xeb\x0ewX@'
|
285 |
+
p121
|
286 |
+
tp122
|
287 |
+
bsg98
|
288 |
+
NsbsS'global_transform'
|
289 |
+
p123
|
290 |
+
g0
|
291 |
+
(cmenpofit.transform.homogeneous
|
292 |
+
DifferentiableAlignmentSimilarity
|
293 |
+
p124
|
294 |
+
g2
|
295 |
+
Ntp125
|
296 |
+
Rp126
|
297 |
+
(dp127
|
298 |
+
S'_h_matrix'
|
299 |
+
p128
|
300 |
+
g64
|
301 |
+
(g65
|
302 |
+
(I0
|
303 |
+
tp129
|
304 |
+
g67
|
305 |
+
tp130
|
306 |
+
Rp131
|
307 |
+
(I1
|
308 |
+
(I3
|
309 |
+
I3
|
310 |
+
tp132
|
311 |
+
g74
|
312 |
+
I00
|
313 |
+
S'\x01\x00\x00\x00\x00\x00\xf0?\x14?\xffim\xc5\xb0\xbc\x00\x00\x00\x00\x00\x00\x00:\x9e\x15A\xa5\x81\xc6\x98<\x01\x00\x00\x00\x00\x00\xf0?\x00\x00\x00\x00\x00\x00\x00\xba\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0?'
|
314 |
+
p133
|
315 |
+
tp134
|
316 |
+
bsg113
|
317 |
+
g0
|
318 |
+
(g87
|
319 |
+
g2
|
320 |
+
Ntp135
|
321 |
+
Rp136
|
322 |
+
(dp137
|
323 |
+
g91
|
324 |
+
g64
|
325 |
+
(g65
|
326 |
+
(I0
|
327 |
+
tp138
|
328 |
+
g67
|
329 |
+
tp139
|
330 |
+
Rp140
|
331 |
+
(I1
|
332 |
+
(I5
|
333 |
+
I2
|
334 |
+
tp141
|
335 |
+
g74
|
336 |
+
I00
|
337 |
+
S'I`\xad\xf8b\xe29@W\xa8\x8d\x1e\x11\xc6V\xc0\xf6G\x8c\x0eC\x18!\xc0F\xa7\x80\xf3\xba-K\xc0\xb4\xa2s\xe6\x99\xde1\xc0\xa1\xcb\xc60.H\x05\xc0e\xaba+\x93\xfd#\xc0\xa23\xd0<B I@\x8bxz\x15D\x0e%@\xb7\x18l\xeb\x0ewX@'
|
338 |
+
p142
|
339 |
+
tp143
|
340 |
+
bsg98
|
341 |
+
NsbsS'allow_mirror'
|
342 |
+
p144
|
343 |
+
I00
|
344 |
+
sS'_source'
|
345 |
+
p145
|
346 |
+
g136
|
347 |
+
sbsS'model'
|
348 |
+
p146
|
349 |
+
g0
|
350 |
+
(cmenpo.model.pca
|
351 |
+
PCAModel
|
352 |
+
p147
|
353 |
+
g2
|
354 |
+
Ntp148
|
355 |
+
Rp149
|
356 |
+
(dp150
|
357 |
+
S'centred'
|
358 |
+
p151
|
359 |
+
I01
|
360 |
+
sg86
|
361 |
+
g0
|
362 |
+
(g87
|
363 |
+
g2
|
364 |
+
Ntp152
|
365 |
+
Rp153
|
366 |
+
(dp154
|
367 |
+
g91
|
368 |
+
g64
|
369 |
+
(g65
|
370 |
+
(I0
|
371 |
+
tp155
|
372 |
+
g67
|
373 |
+
tp156
|
374 |
+
Rp157
|
375 |
+
(I1
|
376 |
+
(I5
|
377 |
+
I2
|
378 |
+
tp158
|
379 |
+
g74
|
380 |
+
I00
|
381 |
+
S"\xd4\xe1\rs\n\x8b2@\xa7\xff\xf6\x06\x07\xc1X\xc0+\x97u\x96\xcb\xd4\x01\xc0\xf1\r'/U\xf0H\xc0\x06\xbcznz\xf6$\xc0\x12\x14\xe1]\xc6\x16\x00@\xee\xca_\xdePN$\xc0\x12\x9aDLYeH@\x86R\xb8\x98\xd2G\x11@\t1y\xc5\xce\x85X@"
|
382 |
+
p159
|
383 |
+
tp160
|
384 |
+
bsg98
|
385 |
+
NsbsS'_eigenvalues'
|
386 |
+
p161
|
387 |
+
g64
|
388 |
+
(g65
|
389 |
+
(I0
|
390 |
+
tp162
|
391 |
+
g67
|
392 |
+
tp163
|
393 |
+
Rp164
|
394 |
+
(I1
|
395 |
+
(I6
|
396 |
+
tp165
|
397 |
+
g74
|
398 |
+
I00
|
399 |
+
S'\x04\x9b\x8d\xa8\x00\x94d@\xeex\x9f\x01\xe1\xd6D@\xd2\x00?\xe1\xdd\xcd/@\xaaoX\xe3ss"@Y8\xa1\xec\x80O\x1c@o\xb6\xf3\xc4\xd7\xf4\x10@'
|
400 |
+
p166
|
401 |
+
tp167
|
402 |
+
bsS'_trimmed_eigenvalues'
|
403 |
+
p168
|
404 |
+
g64
|
405 |
+
(g65
|
406 |
+
(I0
|
407 |
+
tp169
|
408 |
+
g67
|
409 |
+
tp170
|
410 |
+
Rp171
|
411 |
+
(I1
|
412 |
+
(I1
|
413 |
+
tp172
|
414 |
+
g74
|
415 |
+
I00
|
416 |
+
S'\xc0Q\x01\xab\xf7\xcc\xf1?'
|
417 |
+
p173
|
418 |
+
tp174
|
419 |
+
bsg63
|
420 |
+
g64
|
421 |
+
(g65
|
422 |
+
(I0
|
423 |
+
tp175
|
424 |
+
g67
|
425 |
+
tp176
|
426 |
+
Rp177
|
427 |
+
(I1
|
428 |
+
(I6
|
429 |
+
I10
|
430 |
+
tp178
|
431 |
+
g74
|
432 |
+
I00
|
433 |
+
S'\xbcL\xa6T\x9c\xa8\xde?\xe39\x13l\x125\xd8?CS\xb6\xae\xab\xd2\xd3\xbf\xc5\xf1arQ\xcc\xc9\xbf:\xec\x14\xa9\x13\x81\xdf\xbfGQI{\xf6\x94\xbd\xbf\x97p\xf2\t\xda[\xc1\xbf\xd0[2\xd9\x0fN\x92?\x11+\x1e\x08\x10Y\xdd?\xafI\x8cF4:\xb4\xbf5\xfbM\xaf9-\xbf\xbfG\xe2\x05\x16P\xaa\xdb?\xf48\xed\x82&\xda\xd3\xbf\xa8t\xe8\x98\x8f\xf5\xd3\xbf\x8c+\xfd\x00\x81\xb6\xd0?}\xcd\xca\x9c\xb9B\xd5\xbf\xd6\x0ee,\xcd\xd8\xd6?\xfdU\xc6\x1f\x00\xf3\xce\xbfC\x05C}\xb2\xd3\xc7\xbf\xda\x8a\x90/y\x07\xdd?\xa2\x84\x04)a\x86\xd6\xbfY\x96\xa9\xbdV\xa3\xc5?0\xdc\x0f\x06\x9fQ\xe1?\xc3(\x12\xd5\xc4\xb4\xb5?\xfc\xf7s\xe8\xbd\xac\xc1\xbf\x03T\xb9\x0b\xe6\xa1\xdb\xbf\xc5\xf2\x82P\xa3\xc0\xd7\xbf\x04v\xac\x89LR\xbe\xbf\x01\xbb\xa1a%z\xd4?&\x1c\x0b\x9a\x9c\xf7\xd2?\x06;n\xd6\xa0s\xc3?\xf3\xd4\x15\xde\x92_\xcc\xbf\x9f\xa6\xc8\xa6\xa0*\x86?\xc8?tF\x88\x17\xe0?\xa0\x140\xd0G\xde\xdd\xbf\xe0*Ld\xd4:\xa4\xbf\x97@~\xe2\xc2\xbd\xd8?\xe7\xa7M0\xef>\xdf\xbf\xbb:\xee\n\x82*\xb5\xbfI0\xf3=\x05\x8e\xcf?\xc42\x1a\x8d\x83\n\xcb\xbfA\x89\x8f%\xbcS\xc5?,J\xf9O\xff\xff\xdb?\xd0\x16\xdb|\xf1\xfc\xdd\xbfP\x0bU\x98\x03:\xde\xbf\xea\xe1T\x0c\xd8{\xd1?\xe7\xf10\xa8\x81G\xda?`\xf0\xd2\x01v\r\xbf?\xc2.\x902w\x10\xc5\xbf9/\xd9\x8a\x88\xb0\xb7\xbf\xf5\xb0\x85\xf6\xb5\xed|\xbf\x9e_V\x9b\xe03\x98?\xc6z\x14Ns\x10\x97?x\xa0M\x01>?\xcf\xbf\x849y\xd7\xa8Y\xc0?\x92\x95\x06"\xea?\xe4?\x01\xf5\x81S\xa7d\xd3\xbf\xee\xa1\xb7\x14\x87f\xe3\xbf\xafN|\x15\x05u\xc4?\x02\x06\xa7\xb85\xd3\xc8?'
|
434 |
+
p179
|
435 |
+
tp180
|
436 |
+
bsg79
|
437 |
+
g64
|
438 |
+
(g65
|
439 |
+
(I0
|
440 |
+
tp181
|
441 |
+
g67
|
442 |
+
tp182
|
443 |
+
Rp183
|
444 |
+
(I1
|
445 |
+
(I10
|
446 |
+
tp184
|
447 |
+
g74
|
448 |
+
I00
|
449 |
+
S'I`\xad\xf8b\xe29@W\xa8\x8d\x1e\x11\xc6V\xc0\xf6G\x8c\x0eC\x18!\xc0F\xa7\x80\xf3\xba-K\xc0\xb4\xa2s\xe6\x99\xde1\xc0\xa1\xcb\xc60.H\x05\xc0e\xaba+\x93\xfd#\xc0\xa23\xd0<B I@\x8bxz\x15D\x0e%@\xb7\x18l\xeb\x0ewX@'
|
450 |
+
p185
|
451 |
+
tp186
|
452 |
+
bsS'n_samples'
|
453 |
+
p187
|
454 |
+
I3148
|
455 |
+
sS'_n_active_components'
|
456 |
+
p188
|
457 |
+
I6
|
458 |
+
sbsbasS'reference_shape'
|
459 |
+
p189
|
460 |
+
g0
|
461 |
+
(g87
|
462 |
+
g2
|
463 |
+
Ntp190
|
464 |
+
Rp191
|
465 |
+
(dp192
|
466 |
+
g91
|
467 |
+
g64
|
468 |
+
(g65
|
469 |
+
(I0
|
470 |
+
tp193
|
471 |
+
g67
|
472 |
+
tp194
|
473 |
+
Rp195
|
474 |
+
(I1
|
475 |
+
(I5
|
476 |
+
I2
|
477 |
+
tp196
|
478 |
+
g74
|
479 |
+
I00
|
480 |
+
S"\xe8\r\xf7\xfcN$d@v4$\x98T\x08b@(sf\x9ec\x9f_@\x8aan\x9e\x0b\xbaf@\xbc\xbfJ\xf7\xc4\x1c]@\xd2\xdd\xa9\x93DVm@\x8bu\xea)\x97\x05_@_\xcf\xfc\x1a'\x18r@\x03)\xd1\\\xd8$b@`\xe2\x9b\xbaE3u@"
|
481 |
+
p197
|
482 |
+
tp198
|
483 |
+
bsg98
|
484 |
+
Nsbsg48
|
485 |
+
(lp199
|
486 |
+
g49
|
487 |
+
asg51
|
488 |
+
g34
|
489 |
+
sg46
|
490 |
Nsb.
|
MakeItTalk/thirdparty/face_of_art/pdm_clm_models/clm_models/basic_l_eye
CHANGED
@@ -1,486 +1,486 @@
|
|
1 |
-
ccopy_reg
|
2 |
-
_reconstructor
|
3 |
-
p0
|
4 |
-
(cmenpofit.clm.base
|
5 |
-
CLM
|
6 |
-
p1
|
7 |
-
c__builtin__
|
8 |
-
object
|
9 |
-
p2
|
10 |
-
Ntp3
|
11 |
-
Rp4
|
12 |
-
(dp5
|
13 |
-
S'opt'
|
14 |
-
p6
|
15 |
-
(dp7
|
16 |
-
S'ablation'
|
17 |
-
p8
|
18 |
-
(I01
|
19 |
-
I01
|
20 |
-
tp9
|
21 |
-
sS'verbose'
|
22 |
-
p10
|
23 |
-
I00
|
24 |
-
sS'rho2'
|
25 |
-
p11
|
26 |
-
I20
|
27 |
-
sS'sigRate'
|
28 |
-
p12
|
29 |
-
F0.25
|
30 |
-
sS'ratio2'
|
31 |
-
p13
|
32 |
-
F0.08
|
33 |
-
sS'smooth'
|
34 |
-
p14
|
35 |
-
I01
|
36 |
-
sS'dataset'
|
37 |
-
p15
|
38 |
-
S'demo'
|
39 |
-
p16
|
40 |
-
sS'ratio1'
|
41 |
-
p17
|
42 |
-
F0.12
|
43 |
-
sS'pdm_rho'
|
44 |
-
p18
|
45 |
-
I20
|
46 |
-
sS'sigOffset'
|
47 |
-
p19
|
48 |
-
I25
|
49 |
-
sS'kernel_covariance'
|
50 |
-
p20
|
51 |
-
I10
|
52 |
-
sS'numIter'
|
53 |
-
p21
|
54 |
-
I5
|
55 |
-
ssS'_shape_model_cls'
|
56 |
-
p22
|
57 |
-
(lp23
|
58 |
-
cmenpofit.modelinstance
|
59 |
-
OrthoPDM
|
60 |
-
p24
|
61 |
-
asS'max_shape_components'
|
62 |
-
p25
|
63 |
-
(lp26
|
64 |
-
NasS'scales'
|
65 |
-
p27
|
66 |
-
(lp28
|
67 |
-
I1
|
68 |
-
asS'diagonal'
|
69 |
-
p29
|
70 |
-
I200
|
71 |
-
sS'holistic_features'
|
72 |
-
p30
|
73 |
-
(lp31
|
74 |
-
cmenpo.feature.features
|
75 |
-
no_op
|
76 |
-
p32
|
77 |
-
asS'patch_shape'
|
78 |
-
p33
|
79 |
-
(lp34
|
80 |
-
(I8
|
81 |
-
I8
|
82 |
-
tp35
|
83 |
-
asS'expert_ensemble_cls'
|
84 |
-
p36
|
85 |
-
(lp37
|
86 |
-
cmenpofit.clm.expert.ensemble
|
87 |
-
FcnFilterExpertEnsemble
|
88 |
-
p38
|
89 |
-
asS'expert_ensembles'
|
90 |
-
p39
|
91 |
-
(lp40
|
92 |
-
g0
|
93 |
-
(g38
|
94 |
-
g2
|
95 |
-
Ntp41
|
96 |
-
Rp42
|
97 |
-
(dp43
|
98 |
-
S'sample_offsets'
|
99 |
-
p44
|
100 |
-
NsS'cosine_mask'
|
101 |
-
p45
|
102 |
-
I01
|
103 |
-
sS'context_shape'
|
104 |
-
p46
|
105 |
-
(I8
|
106 |
-
I8
|
107 |
-
tp47
|
108 |
-
sg33
|
109 |
-
g35
|
110 |
-
sS'response_covariance'
|
111 |
-
p48
|
112 |
-
I3
|
113 |
-
sS'patch_normalisation'
|
114 |
-
p49
|
115 |
-
g32
|
116 |
-
sS'_icf'
|
117 |
-
p50
|
118 |
-
Nsbasg45
|
119 |
-
I01
|
120 |
-
sS'shape_models'
|
121 |
-
p51
|
122 |
-
(lp52
|
123 |
-
g0
|
124 |
-
(g24
|
125 |
-
g2
|
126 |
-
Ntp53
|
127 |
-
Rp54
|
128 |
-
(dp55
|
129 |
-
S'similarity_model'
|
130 |
-
p56
|
131 |
-
g0
|
132 |
-
(cmenpofit.modelinstance
|
133 |
-
_SimilarityModel
|
134 |
-
p57
|
135 |
-
g2
|
136 |
-
Ntp58
|
137 |
-
Rp59
|
138 |
-
(dp60
|
139 |
-
S'_components'
|
140 |
-
p61
|
141 |
-
cnumpy.core.multiarray
|
142 |
-
_reconstruct
|
143 |
-
p62
|
144 |
-
(cnumpy
|
145 |
-
ndarray
|
146 |
-
p63
|
147 |
-
(I0
|
148 |
-
tp64
|
149 |
-
S'b'
|
150 |
-
p65
|
151 |
-
tp66
|
152 |
-
Rp67
|
153 |
-
(I1
|
154 |
-
(I4
|
155 |
-
I12
|
156 |
-
tp68
|
157 |
-
cnumpy
|
158 |
-
dtype
|
159 |
-
p69
|
160 |
-
(S'f8'
|
161 |
-
p70
|
162 |
-
I0
|
163 |
-
I1
|
164 |
-
tp71
|
165 |
-
Rp72
|
166 |
-
(I3
|
167 |
-
S'<'
|
168 |
-
p73
|
169 |
-
NNNI-1
|
170 |
-
I-1
|
171 |
-
I0
|
172 |
-
tp74
|
173 |
-
bI00
|
174 |
-
S'\x80Y7\x85z\xd5\x85\xbf\xfd\xa4E\x80\xdda\xe2?<~[\x891\x08\xca?\xfa\x8c\xe9Q\xa3t\xcb?$JN\x81\xe09\xc9?W\x9d\xc7u\xea\xd3\xcb\xbf\xf1HK\x07\xcb\x95\xb3\xbf\xca6\xe9\xff\x8f\xe4\xe2\xbf\x8e.\xf6\xb9Q\xb0\xc3\xbf\xc3/\xa8\x9e\xaaZ\xca\xbf\xae\x7f\xba$\x83i\xc4\xbfS\x86\x14\xc1\xbb\xc4\xcc?\xfd\xa4E\x80\xdda\xe2\xbf\x9bY7\x85z\xd5\x85\xbf\xfa\x8c\xe9Q\xa3t\xcb\xbf<~[\x891\x08\xca?X\x9d\xc7u\xea\xd3\xcb?%JN\x81\xe09\xc9?\xca6\xe9\xff\x8f\xe4\xe2?\xedHK\x07\xcb\x95\xb3\xbf\xc6/\xa8\x9e\xaaZ\xca?\x8e.\xf6\xb9Q\xb0\xc3\xbfQ\x86\x14\xc1\xbb\xc4\xcc\xbf\xaf\x7f\xba$\x83i\xc4\xbf\\,\x0cp\xbd \xda\xbf<\xcd\x19(\xdb\xa7\xa0<N,\x0cp\xbd \xda\xbfT %\x14yg\xc5<5,\x0cp\xbd \xda\xbfB\x17\x82D\xc0\xdd\xc2<!,\x0cp\xbd \xda\xbf\x12\x9c\x8b)\x8c\xed\xb5\xbc6,\x0cp\xbd \xda\xbfz\xd9\xc4?\xe1\xde\xc0\xbcL,\x0cp\xbd \xda\xbf\xde\xa6\xa6E\xf1\x96\xbc\xbc|\xa0\xb9\xa94^\xa7\xbc[,\x0cp\xbd \xda\xbf\x19\x95\x81\xd7\x82E\xc5\xbcH,\x0cp\xbd \xda\xbf\xdb\x98x\x19\x9d\xcb\xc1\xbc0,\x0cp\xbd \xda\xbf\xf7\x87\x821\xa1\x93\xba<\x1e,\x0cp\xbd \xda\xbf\x9aD\xdbW\x02\xb5\xc1<3,\x0cp\xbd \xda\xbf(\xc9\xba\x1a\xb8+\xbd<H,\x0cp\xbd \xda\xbf'
|
175 |
-
p75
|
176 |
-
tp76
|
177 |
-
bsS'_mean'
|
178 |
-
p77
|
179 |
-
g62
|
180 |
-
(g63
|
181 |
-
(I0
|
182 |
-
tp78
|
183 |
-
g65
|
184 |
-
tp79
|
185 |
-
Rp80
|
186 |
-
(I1
|
187 |
-
(I12
|
188 |
-
tp81
|
189 |
-
g72
|
190 |
-
I00
|
191 |
-
S'\xeb\xbdE\xceL=\xfb?\xa4gE\xd3\xec\xeeV\xc0\x14\xd2\xc3Y\x0b=@\xc0\xac\xf4\xdcda A\xc0\x92$ o\xb1x?\xc0[\xeb\xc4K\xd0[A@I8K\xa9\x16o(@\xb9\x08b\xd9\xfa\x91W@\xa3F\xe9\x92.\x908@n\xd1\x9ap}p@@\xff\x894n9w9@\xa6\t\xbcc\x08\xf2A\xc0'
|
192 |
-
p82
|
193 |
-
tp83
|
194 |
-
bsS'template_instance'
|
195 |
-
p84
|
196 |
-
g0
|
197 |
-
(cmenpo.shape.pointcloud
|
198 |
-
PointCloud
|
199 |
-
p85
|
200 |
-
g2
|
201 |
-
Ntp86
|
202 |
-
Rp87
|
203 |
-
(dp88
|
204 |
-
S'points'
|
205 |
-
p89
|
206 |
-
g62
|
207 |
-
(g63
|
208 |
-
(I0
|
209 |
-
tp90
|
210 |
-
g65
|
211 |
-
tp91
|
212 |
-
Rp92
|
213 |
-
(I1
|
214 |
-
(I6
|
215 |
-
I2
|
216 |
-
tp93
|
217 |
-
g72
|
218 |
-
I00
|
219 |
-
S'\xeb\xbdE\xceL=\xfb?\xa4gE\xd3\xec\xeeV\xc0\x14\xd2\xc3Y\x0b=@\xc0\xac\xf4\xdcda A\xc0\x92$ o\xb1x?\xc0[\xeb\xc4K\xd0[A@I8K\xa9\x16o(@\xb9\x08b\xd9\xfa\x91W@\xa3F\xe9\x92.\x908@n\xd1\x9ap}p@@\xff\x894n9w9@\xa6\t\xbcc\x08\xf2A\xc0'
|
220 |
-
p94
|
221 |
-
tp95
|
222 |
-
bsS'_landmarks'
|
223 |
-
p96
|
224 |
-
NsbsbsS'similarity_weights'
|
225 |
-
p97
|
226 |
-
g62
|
227 |
-
(g63
|
228 |
-
(I0
|
229 |
-
tp98
|
230 |
-
g65
|
231 |
-
tp99
|
232 |
-
Rp100
|
233 |
-
(I1
|
234 |
-
(I4
|
235 |
-
tp101
|
236 |
-
g72
|
237 |
-
I00
|
238 |
-
S'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
|
239 |
-
p102
|
240 |
-
tp103
|
241 |
-
bsS'_weights'
|
242 |
-
p104
|
243 |
-
g62
|
244 |
-
(g63
|
245 |
-
(I0
|
246 |
-
tp105
|
247 |
-
g65
|
248 |
-
tp106
|
249 |
-
Rp107
|
250 |
-
(I1
|
251 |
-
(I8
|
252 |
-
tp108
|
253 |
-
g72
|
254 |
-
I00
|
255 |
-
S'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
|
256 |
-
p109
|
257 |
-
tp110
|
258 |
-
bsS'_target'
|
259 |
-
p111
|
260 |
-
g0
|
261 |
-
(g85
|
262 |
-
g2
|
263 |
-
Ntp112
|
264 |
-
Rp113
|
265 |
-
(dp114
|
266 |
-
g89
|
267 |
-
g62
|
268 |
-
(g63
|
269 |
-
(I0
|
270 |
-
tp115
|
271 |
-
g65
|
272 |
-
tp116
|
273 |
-
Rp117
|
274 |
-
(I1
|
275 |
-
(I6
|
276 |
-
I2
|
277 |
-
tp118
|
278 |
-
g72
|
279 |
-
I00
|
280 |
-
S'\xfe\xbdE\xceL=\xfb?\xa3gE\xd3\xec\xeeV\xc0\x12\xd2\xc3Y\x0b=@\xc0\xab\xf4\xdcda A\xc0\x8f$ o\xb1x?\xc0Z\xeb\xc4K\xd0[A@C8K\xa9\x16o(@\xb8\x08b\xd9\xfa\x91W@\xa0F\xe9\x92.\x908@m\xd1\x9ap}p@@\xfd\x894n9w9@\xa5\t\xbcc\x08\xf2A\xc0'
|
281 |
-
p119
|
282 |
-
tp120
|
283 |
-
bsg96
|
284 |
-
NsbsS'global_transform'
|
285 |
-
p121
|
286 |
-
g0
|
287 |
-
(cmenpofit.transform.homogeneous
|
288 |
-
DifferentiableAlignmentSimilarity
|
289 |
-
p122
|
290 |
-
g2
|
291 |
-
Ntp123
|
292 |
-
Rp124
|
293 |
-
(dp125
|
294 |
-
S'_h_matrix'
|
295 |
-
p126
|
296 |
-
g62
|
297 |
-
(g63
|
298 |
-
(I0
|
299 |
-
tp127
|
300 |
-
g65
|
301 |
-
tp128
|
302 |
-
Rp129
|
303 |
-
(I1
|
304 |
-
(I3
|
305 |
-
I3
|
306 |
-
tp130
|
307 |
-
g72
|
308 |
-
I00
|
309 |
-
S'\xfc\xff\xff\xff\xff\xff\xef?s)\xcc\xc7\xdfN\x8e\xbc\x00\x00\x00\x00\x00\x00\xd09\x8fu\xdde\xf1l\x8b<\xfe\xff\xff\xff\xff\xff\xef?\x00\x00\x00\x00\x00\x00\x10:\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0?'
|
310 |
-
p131
|
311 |
-
tp132
|
312 |
-
bsg111
|
313 |
-
g0
|
314 |
-
(g85
|
315 |
-
g2
|
316 |
-
Ntp133
|
317 |
-
Rp134
|
318 |
-
(dp135
|
319 |
-
g89
|
320 |
-
g62
|
321 |
-
(g63
|
322 |
-
(I0
|
323 |
-
tp136
|
324 |
-
g65
|
325 |
-
tp137
|
326 |
-
Rp138
|
327 |
-
(I1
|
328 |
-
(I6
|
329 |
-
I2
|
330 |
-
tp139
|
331 |
-
g72
|
332 |
-
I00
|
333 |
-
S'\xeb\xbdE\xceL=\xfb?\xa4gE\xd3\xec\xeeV\xc0\x14\xd2\xc3Y\x0b=@\xc0\xac\xf4\xdcda A\xc0\x92$ o\xb1x?\xc0[\xeb\xc4K\xd0[A@I8K\xa9\x16o(@\xb9\x08b\xd9\xfa\x91W@\xa3F\xe9\x92.\x908@n\xd1\x9ap}p@@\xff\x894n9w9@\xa6\t\xbcc\x08\xf2A\xc0'
|
334 |
-
p140
|
335 |
-
tp141
|
336 |
-
bsg96
|
337 |
-
NsbsS'allow_mirror'
|
338 |
-
p142
|
339 |
-
I00
|
340 |
-
sS'_source'
|
341 |
-
p143
|
342 |
-
g134
|
343 |
-
sbsS'model'
|
344 |
-
p144
|
345 |
-
g0
|
346 |
-
(cmenpo.model.pca
|
347 |
-
PCAModel
|
348 |
-
p145
|
349 |
-
g2
|
350 |
-
Ntp146
|
351 |
-
Rp147
|
352 |
-
(dp148
|
353 |
-
S'centred'
|
354 |
-
p149
|
355 |
-
I01
|
356 |
-
sg84
|
357 |
-
g0
|
358 |
-
(g85
|
359 |
-
g2
|
360 |
-
Ntp150
|
361 |
-
Rp151
|
362 |
-
(dp152
|
363 |
-
g89
|
364 |
-
g62
|
365 |
-
(g63
|
366 |
-
(I0
|
367 |
-
tp153
|
368 |
-
g65
|
369 |
-
tp154
|
370 |
-
Rp155
|
371 |
-
(I1
|
372 |
-
(I6
|
373 |
-
I2
|
374 |
-
tp156
|
375 |
-
g72
|
376 |
-
I00
|
377 |
-
S'\xbe[\x86X\xbbp+@\xe5[2\t\x00cV\xc0o{D\x81\xa7+E\xc0\x97r&~f\xf6A\xc0\x04\xcbP\xb8)\x17A\xc0\xcc\xbaUg\xe2\xb5C@$\\\xcas\xc4\x8b2@:\xd9\xb7\xef\x8dPV@\x18t\xb6\xca\x19\x0f7@\xbc\xfd\x81\xbd\xc5pA@\xca\x8ef\x88f25@\x13@\xbcs]\x0bC\xc0'
|
378 |
-
p157
|
379 |
-
tp158
|
380 |
-
bsg96
|
381 |
-
NsbsS'_eigenvalues'
|
382 |
-
p159
|
383 |
-
g62
|
384 |
-
(g63
|
385 |
-
(I0
|
386 |
-
tp160
|
387 |
-
g65
|
388 |
-
tp161
|
389 |
-
Rp162
|
390 |
-
(I1
|
391 |
-
(I8
|
392 |
-
tp163
|
393 |
-
g72
|
394 |
-
I00
|
395 |
-
S'd\x8c-9=\xf2p@#\xa0\x90\xeaD\xbfQ@\xc5@K\x9a\xd93O@\xe50\x84-\xda\x166@\xb9\x03U\x8fb\xd80@\xebg\xa6\xc21\x92#@z8\\\x1e\xd5[ @\xf8\xde\xc6(\xcf\xfb\x19@'
|
396 |
-
p164
|
397 |
-
tp165
|
398 |
-
bsS'_trimmed_eigenvalues'
|
399 |
-
p166
|
400 |
-
g62
|
401 |
-
(g63
|
402 |
-
(I0
|
403 |
-
tp167
|
404 |
-
g65
|
405 |
-
tp168
|
406 |
-
Rp169
|
407 |
-
(I1
|
408 |
-
(I1
|
409 |
-
tp170
|
410 |
-
g72
|
411 |
-
I00
|
412 |
-
S'L\xa2\xee\xf8\xc3X\x0c@'
|
413 |
-
p171
|
414 |
-
tp172
|
415 |
-
bsg61
|
416 |
-
g62
|
417 |
-
(g63
|
418 |
-
(I0
|
419 |
-
tp173
|
420 |
-
g65
|
421 |
-
tp174
|
422 |
-
Rp175
|
423 |
-
(I1
|
424 |
-
(I8
|
425 |
-
I12
|
426 |
-
tp176
|
427 |
-
g72
|
428 |
-
I00
|
429 |
-
S'\xce\x7f&\xb9\xf8H\xb8?\xd0U\x01\xa7\x1f\x90\xda?T\xfazS\x89\xdc\xdf\xbf_y\x1c\xfah$\xb7\xbfj\xadM\xaa\xbb\x1d\xde\xbf\xffK\x98\xe4\xcb^\xa3\xbf\xbe\x1a\x06\xe4\x8c\xee\xbb?`\x14n\x19*S\xc5\xbf\x01 \x92J\xeb1\xd7?\x01F\x0c\xe2]?\xad\xbf\x14a\xebKx\xba\xd9?%l\xba\x8b\xac&\xb0\xbf\xed>HAs\xd2\xc7?\xa1\xa7\xe3\x89\xdfW\xda?\x1e\xdf\xb0\x8e\x8d\x0e\xb6\xbfm\xff\x10UP\xa7\xd2\xbf\xdf9\x1f\xa8\xa3\xdc\xb5?\x1b\xbc|\xe7\x8e\xee\xd6\xbfdg\xcf\xcb\xa0\xbc\xd4?){\x18\xa7\xa9!\xe1?:\xc6\xf7\xec\xb3\xd0\xcf\xbf\xa2\x1a1\x88*c\xc7\xbfn:S\xfc\x05\xb1\xd0\xbf_T\xb9]\xf9N\xbd\xbf\x11~\x90#\x9b4\xe2\xbf1\xa49\x06d\x8e\xd1?(\xc2\r\xed\x86J\xcc?\xa80\xd3\x16\x9b\x86\xc9\xbfE)z$=&\xcd?\xf1"\x13\x97\x17\xc8\xbe\xbfbr\xca=\x05\xbf\xdb\xbf\xffZ\x1f\x0e\xad-\xd1?v]\x9a8\xca\xe6\xce?\xd0\x8d\xdc@\xb7Z\xae\xbf\x13J\xda_t\xfc\xd3?\xbe\x18\x1ev\xcd\xf6\xc4\xbf_u\xd2-@\xa9\xd2\xbfy\xc8\xc4\xf9Z\xea\xcb\xbf\xa6\x84\xe2\x13\x04*b?\x01\x07\r</\xe8\xaa\xbf\x82\xfdP&\x8e\xbd\xbd\xbf\xdd\x8f\x1f\xe0\xe6"\xe3\xbf\x027\xf5\xd5\x81h\xce?1\xbc\x10\x1c\x9a|\x8a?\xc4\xc5\xf7qkH\xa5?\x9e\xben\xb7h\xf4\xd0?\xfa\xb6\n\xac\x02.\xc0?,\xf0)\xce\xd9\xe7\xe2?\xa1da\xba\xfc\xad\xb9?\x97\xc2 \x838\x81\xc6\xbf-\xc7\xb6\xc9\x9c\xfc\xb6\xbf\x8aQH\xa6\x96#\xe4?EjP\x04\xfd\x8f\xab\xbf\x10\xc5\xe3\xe4\xbc\xb3\xcf\xbf\xf4\x81\xf5G/2\xc6\xbfl\x14\x9c\xb8\xf6\xfc\xd2?\xda\xbf\xd8-\xd0\x99\xc3?\xe7k\x9f\xaf\xe4\x9f\xe2\xbf\xda\x1b\xb7\xc5\\G\xb0?`\x90Q8\x80X\xb4?\x1b\xce`\x10\xe5\x08\xbe?8BA\xc2O\x02\xb1\xbf\xe2\xbcMh<\xbe\xe4\xbf\x034\xb5\x88_\xf6\x96?.a@\x1f\x9b%\xe5?\xda9~\x97\xa8\x1d\xb9\xbfaG\x9b\x98\x03\xb7\xc6\xbf\xa7\xa5\xb1\xd4n\xec\xac\xbf\x84\x82a\xe2Y\xa3\xc1\xbf\x0b/\x85\xf4\xf5O\xc4?\x9f\xd1\x01\x17p\xb8\xc7?\x81G\xc1\xf1Wq\xa4?\x86\x96B\x935\xc2\xba?\x1cN\x81\xdb\xe1\xc9\xa2\xbf^E\xe2\x10$\xcb\xbd\xbfZw\xdc\xa8h\xa6\xd9\xbfO\xa5\xb3i\xdf\xc5\xbd?]\x89*\xb7\xe9\xe9\xc9?0v\x14P|\x9f\xc7\xbf\x16\x0c\xa5\xaa)E\x97?#\xf1\xd6\x1f\xb3\xf6\xdf?[GZa\x8e\x02\xd1\xbf\xab\xb3\xd121\xd6\xda\xbf\x0eSg\xcf\xeb\x98\xde?\x1f\xb3\xd3\xd1lD\xc0\xbfPBQ\xab\x9c7\xa7\xbf\xbb\xa9Li\x97\x05{?!5\xf9\xadf\x7f\xcd\xbf\xf2$\x1e\xea\x8b\x04n\xbf\xab\x0c<\x84\xb4_\xd7?kx\x9a\x8d\x1b\xfd\xc2?\xc2\x03\xc9\x83^\x05\x99?\xf9\x03B%W,\xde\xbf\xd9\xf2$\xc7\x12.\xde\xbf\xf8\xaa\x8d\x81\xf2\x9f\xdc?\xd2\x18\x13G\xaf\xe4\xd6?'
|
430 |
-
p177
|
431 |
-
tp178
|
432 |
-
bsg77
|
433 |
-
g62
|
434 |
-
(g63
|
435 |
-
(I0
|
436 |
-
tp179
|
437 |
-
g65
|
438 |
-
tp180
|
439 |
-
Rp181
|
440 |
-
(I1
|
441 |
-
(I12
|
442 |
-
tp182
|
443 |
-
g72
|
444 |
-
I00
|
445 |
-
S'\xeb\xbdE\xceL=\xfb?\xa4gE\xd3\xec\xeeV\xc0\x14\xd2\xc3Y\x0b=@\xc0\xac\xf4\xdcda A\xc0\x92$ o\xb1x?\xc0[\xeb\xc4K\xd0[A@I8K\xa9\x16o(@\xb9\x08b\xd9\xfa\x91W@\xa3F\xe9\x92.\x908@n\xd1\x9ap}p@@\xff\x894n9w9@\xa6\t\xbcc\x08\xf2A\xc0'
|
446 |
-
p183
|
447 |
-
tp184
|
448 |
-
bsS'n_samples'
|
449 |
-
p185
|
450 |
-
I3148
|
451 |
-
sS'_n_active_components'
|
452 |
-
p186
|
453 |
-
I8
|
454 |
-
sbsbasS'reference_shape'
|
455 |
-
p187
|
456 |
-
g0
|
457 |
-
(g85
|
458 |
-
g2
|
459 |
-
Ntp188
|
460 |
-
Rp189
|
461 |
-
(dp190
|
462 |
-
g89
|
463 |
-
g62
|
464 |
-
(g63
|
465 |
-
(I0
|
466 |
-
tp191
|
467 |
-
g65
|
468 |
-
tp192
|
469 |
-
Rp193
|
470 |
-
(I1
|
471 |
-
(I6
|
472 |
-
I2
|
473 |
-
tp194
|
474 |
-
g72
|
475 |
-
I00
|
476 |
-
S"\xe1P\xbe\x86\x923x@ft\xc0\x89\xe9Mw@\xe5\x85\x92\xd3\xeb\x05v@\xa1\xacg\xaf?\x02{@\xa9\x1b\xaa\x8fV\x19v@jk\x0bsSo\x7f@\x00x\x84s\x84\xedx@\xa0\x88\xaf\xe9\xe6\x9d\x81@!\xe0[\x12\x17\xb8y@6q\x1b'kL\x7f@\x1eE\xfaF\xd2\xc1y@\xe6\xa8?\xcd\x08\xdez@"
|
477 |
-
p195
|
478 |
-
tp196
|
479 |
-
bsg96
|
480 |
-
Nsbsg46
|
481 |
-
(lp197
|
482 |
-
g47
|
483 |
-
asg49
|
484 |
-
g32
|
485 |
-
sg44
|
486 |
Nsb.
|
|
|
1 |
+
ccopy_reg
|
2 |
+
_reconstructor
|
3 |
+
p0
|
4 |
+
(cmenpofit.clm.base
|
5 |
+
CLM
|
6 |
+
p1
|
7 |
+
c__builtin__
|
8 |
+
object
|
9 |
+
p2
|
10 |
+
Ntp3
|
11 |
+
Rp4
|
12 |
+
(dp5
|
13 |
+
S'opt'
|
14 |
+
p6
|
15 |
+
(dp7
|
16 |
+
S'ablation'
|
17 |
+
p8
|
18 |
+
(I01
|
19 |
+
I01
|
20 |
+
tp9
|
21 |
+
sS'verbose'
|
22 |
+
p10
|
23 |
+
I00
|
24 |
+
sS'rho2'
|
25 |
+
p11
|
26 |
+
I20
|
27 |
+
sS'sigRate'
|
28 |
+
p12
|
29 |
+
F0.25
|
30 |
+
sS'ratio2'
|
31 |
+
p13
|
32 |
+
F0.08
|
33 |
+
sS'smooth'
|
34 |
+
p14
|
35 |
+
I01
|
36 |
+
sS'dataset'
|
37 |
+
p15
|
38 |
+
S'demo'
|
39 |
+
p16
|
40 |
+
sS'ratio1'
|
41 |
+
p17
|
42 |
+
F0.12
|
43 |
+
sS'pdm_rho'
|
44 |
+
p18
|
45 |
+
I20
|
46 |
+
sS'sigOffset'
|
47 |
+
p19
|
48 |
+
I25
|
49 |
+
sS'kernel_covariance'
|
50 |
+
p20
|
51 |
+
I10
|
52 |
+
sS'numIter'
|
53 |
+
p21
|
54 |
+
I5
|
55 |
+
ssS'_shape_model_cls'
|
56 |
+
p22
|
57 |
+
(lp23
|
58 |
+
cmenpofit.modelinstance
|
59 |
+
OrthoPDM
|
60 |
+
p24
|
61 |
+
asS'max_shape_components'
|
62 |
+
p25
|
63 |
+
(lp26
|
64 |
+
NasS'scales'
|
65 |
+
p27
|
66 |
+
(lp28
|
67 |
+
I1
|
68 |
+
asS'diagonal'
|
69 |
+
p29
|
70 |
+
I200
|
71 |
+
sS'holistic_features'
|
72 |
+
p30
|
73 |
+
(lp31
|
74 |
+
cmenpo.feature.features
|
75 |
+
no_op
|
76 |
+
p32
|
77 |
+
asS'patch_shape'
|
78 |
+
p33
|
79 |
+
(lp34
|
80 |
+
(I8
|
81 |
+
I8
|
82 |
+
tp35
|
83 |
+
asS'expert_ensemble_cls'
|
84 |
+
p36
|
85 |
+
(lp37
|
86 |
+
cmenpofit.clm.expert.ensemble
|
87 |
+
FcnFilterExpertEnsemble
|
88 |
+
p38
|
89 |
+
asS'expert_ensembles'
|
90 |
+
p39
|
91 |
+
(lp40
|
92 |
+
g0
|
93 |
+
(g38
|
94 |
+
g2
|
95 |
+
Ntp41
|
96 |
+
Rp42
|
97 |
+
(dp43
|
98 |
+
S'sample_offsets'
|
99 |
+
p44
|
100 |
+
NsS'cosine_mask'
|
101 |
+
p45
|
102 |
+
I01
|
103 |
+
sS'context_shape'
|
104 |
+
p46
|
105 |
+
(I8
|
106 |
+
I8
|
107 |
+
tp47
|
108 |
+
sg33
|
109 |
+
g35
|
110 |
+
sS'response_covariance'
|
111 |
+
p48
|
112 |
+
I3
|
113 |
+
sS'patch_normalisation'
|
114 |
+
p49
|
115 |
+
g32
|
116 |
+
sS'_icf'
|
117 |
+
p50
|
118 |
+
Nsbasg45
|
119 |
+
I01
|
120 |
+
sS'shape_models'
|
121 |
+
p51
|
122 |
+
(lp52
|
123 |
+
g0
|
124 |
+
(g24
|
125 |
+
g2
|
126 |
+
Ntp53
|
127 |
+
Rp54
|
128 |
+
(dp55
|
129 |
+
S'similarity_model'
|
130 |
+
p56
|
131 |
+
g0
|
132 |
+
(cmenpofit.modelinstance
|
133 |
+
_SimilarityModel
|
134 |
+
p57
|
135 |
+
g2
|
136 |
+
Ntp58
|
137 |
+
Rp59
|
138 |
+
(dp60
|
139 |
+
S'_components'
|
140 |
+
p61
|
141 |
+
cnumpy.core.multiarray
|
142 |
+
_reconstruct
|
143 |
+
p62
|
144 |
+
(cnumpy
|
145 |
+
ndarray
|
146 |
+
p63
|
147 |
+
(I0
|
148 |
+
tp64
|
149 |
+
S'b'
|
150 |
+
p65
|
151 |
+
tp66
|
152 |
+
Rp67
|
153 |
+
(I1
|
154 |
+
(I4
|
155 |
+
I12
|
156 |
+
tp68
|
157 |
+
cnumpy
|
158 |
+
dtype
|
159 |
+
p69
|
160 |
+
(S'f8'
|
161 |
+
p70
|
162 |
+
I0
|
163 |
+
I1
|
164 |
+
tp71
|
165 |
+
Rp72
|
166 |
+
(I3
|
167 |
+
S'<'
|
168 |
+
p73
|
169 |
+
NNNI-1
|
170 |
+
I-1
|
171 |
+
I0
|
172 |
+
tp74
|
173 |
+
bI00
|
174 |
+
S'\x80Y7\x85z\xd5\x85\xbf\xfd\xa4E\x80\xdda\xe2?<~[\x891\x08\xca?\xfa\x8c\xe9Q\xa3t\xcb?$JN\x81\xe09\xc9?W\x9d\xc7u\xea\xd3\xcb\xbf\xf1HK\x07\xcb\x95\xb3\xbf\xca6\xe9\xff\x8f\xe4\xe2\xbf\x8e.\xf6\xb9Q\xb0\xc3\xbf\xc3/\xa8\x9e\xaaZ\xca\xbf\xae\x7f\xba$\x83i\xc4\xbfS\x86\x14\xc1\xbb\xc4\xcc?\xfd\xa4E\x80\xdda\xe2\xbf\x9bY7\x85z\xd5\x85\xbf\xfa\x8c\xe9Q\xa3t\xcb\xbf<~[\x891\x08\xca?X\x9d\xc7u\xea\xd3\xcb?%JN\x81\xe09\xc9?\xca6\xe9\xff\x8f\xe4\xe2?\xedHK\x07\xcb\x95\xb3\xbf\xc6/\xa8\x9e\xaaZ\xca?\x8e.\xf6\xb9Q\xb0\xc3\xbfQ\x86\x14\xc1\xbb\xc4\xcc\xbf\xaf\x7f\xba$\x83i\xc4\xbf\\,\x0cp\xbd \xda\xbf<\xcd\x19(\xdb\xa7\xa0<N,\x0cp\xbd \xda\xbfT %\x14yg\xc5<5,\x0cp\xbd \xda\xbfB\x17\x82D\xc0\xdd\xc2<!,\x0cp\xbd \xda\xbf\x12\x9c\x8b)\x8c\xed\xb5\xbc6,\x0cp\xbd \xda\xbfz\xd9\xc4?\xe1\xde\xc0\xbcL,\x0cp\xbd \xda\xbf\xde\xa6\xa6E\xf1\x96\xbc\xbc|\xa0\xb9\xa94^\xa7\xbc[,\x0cp\xbd \xda\xbf\x19\x95\x81\xd7\x82E\xc5\xbcH,\x0cp\xbd \xda\xbf\xdb\x98x\x19\x9d\xcb\xc1\xbc0,\x0cp\xbd \xda\xbf\xf7\x87\x821\xa1\x93\xba<\x1e,\x0cp\xbd \xda\xbf\x9aD\xdbW\x02\xb5\xc1<3,\x0cp\xbd \xda\xbf(\xc9\xba\x1a\xb8+\xbd<H,\x0cp\xbd \xda\xbf'
|
175 |
+
p75
|
176 |
+
tp76
|
177 |
+
bsS'_mean'
|
178 |
+
p77
|
179 |
+
g62
|
180 |
+
(g63
|
181 |
+
(I0
|
182 |
+
tp78
|
183 |
+
g65
|
184 |
+
tp79
|
185 |
+
Rp80
|
186 |
+
(I1
|
187 |
+
(I12
|
188 |
+
tp81
|
189 |
+
g72
|
190 |
+
I00
|
191 |
+
S'\xeb\xbdE\xceL=\xfb?\xa4gE\xd3\xec\xeeV\xc0\x14\xd2\xc3Y\x0b=@\xc0\xac\xf4\xdcda A\xc0\x92$ o\xb1x?\xc0[\xeb\xc4K\xd0[A@I8K\xa9\x16o(@\xb9\x08b\xd9\xfa\x91W@\xa3F\xe9\x92.\x908@n\xd1\x9ap}p@@\xff\x894n9w9@\xa6\t\xbcc\x08\xf2A\xc0'
|
192 |
+
p82
|
193 |
+
tp83
|
194 |
+
bsS'template_instance'
|
195 |
+
p84
|
196 |
+
g0
|
197 |
+
(cmenpo.shape.pointcloud
|
198 |
+
PointCloud
|
199 |
+
p85
|
200 |
+
g2
|
201 |
+
Ntp86
|
202 |
+
Rp87
|
203 |
+
(dp88
|
204 |
+
S'points'
|
205 |
+
p89
|
206 |
+
g62
|
207 |
+
(g63
|
208 |
+
(I0
|
209 |
+
tp90
|
210 |
+
g65
|
211 |
+
tp91
|
212 |
+
Rp92
|
213 |
+
(I1
|
214 |
+
(I6
|
215 |
+
I2
|
216 |
+
tp93
|
217 |
+
g72
|
218 |
+
I00
|
219 |
+
S'\xeb\xbdE\xceL=\xfb?\xa4gE\xd3\xec\xeeV\xc0\x14\xd2\xc3Y\x0b=@\xc0\xac\xf4\xdcda A\xc0\x92$ o\xb1x?\xc0[\xeb\xc4K\xd0[A@I8K\xa9\x16o(@\xb9\x08b\xd9\xfa\x91W@\xa3F\xe9\x92.\x908@n\xd1\x9ap}p@@\xff\x894n9w9@\xa6\t\xbcc\x08\xf2A\xc0'
|
220 |
+
p94
|
221 |
+
tp95
|
222 |
+
bsS'_landmarks'
|
223 |
+
p96
|
224 |
+
NsbsbsS'similarity_weights'
|
225 |
+
p97
|
226 |
+
g62
|
227 |
+
(g63
|
228 |
+
(I0
|
229 |
+
tp98
|
230 |
+
g65
|
231 |
+
tp99
|
232 |
+
Rp100
|
233 |
+
(I1
|
234 |
+
(I4
|
235 |
+
tp101
|
236 |
+
g72
|
237 |
+
I00
|
238 |
+
S'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
|
239 |
+
p102
|
240 |
+
tp103
|
241 |
+
bsS'_weights'
|
242 |
+
p104
|
243 |
+
g62
|
244 |
+
(g63
|
245 |
+
(I0
|
246 |
+
tp105
|
247 |
+
g65
|
248 |
+
tp106
|
249 |
+
Rp107
|
250 |
+
(I1
|
251 |
+
(I8
|
252 |
+
tp108
|
253 |
+
g72
|
254 |
+
I00
|
255 |
+
S'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
|
256 |
+
p109
|
257 |
+
tp110
|
258 |
+
bsS'_target'
|
259 |
+
p111
|
260 |
+
g0
|
261 |
+
(g85
|
262 |
+
g2
|
263 |
+
Ntp112
|
264 |
+
Rp113
|
265 |
+
(dp114
|
266 |
+
g89
|
267 |
+
g62
|
268 |
+
(g63
|
269 |
+
(I0
|
270 |
+
tp115
|
271 |
+
g65
|
272 |
+
tp116
|
273 |
+
Rp117
|
274 |
+
(I1
|
275 |
+
(I6
|
276 |
+
I2
|
277 |
+
tp118
|
278 |
+
g72
|
279 |
+
I00
|
280 |
+
S'\xfe\xbdE\xceL=\xfb?\xa3gE\xd3\xec\xeeV\xc0\x12\xd2\xc3Y\x0b=@\xc0\xab\xf4\xdcda A\xc0\x8f$ o\xb1x?\xc0Z\xeb\xc4K\xd0[A@C8K\xa9\x16o(@\xb8\x08b\xd9\xfa\x91W@\xa0F\xe9\x92.\x908@m\xd1\x9ap}p@@\xfd\x894n9w9@\xa5\t\xbcc\x08\xf2A\xc0'
|
281 |
+
p119
|
282 |
+
tp120
|
283 |
+
bsg96
|
284 |
+
NsbsS'global_transform'
|
285 |
+
p121
|
286 |
+
g0
|
287 |
+
(cmenpofit.transform.homogeneous
|
288 |
+
DifferentiableAlignmentSimilarity
|
289 |
+
p122
|
290 |
+
g2
|
291 |
+
Ntp123
|
292 |
+
Rp124
|
293 |
+
(dp125
|
294 |
+
S'_h_matrix'
|
295 |
+
p126
|
296 |
+
g62
|
297 |
+
(g63
|
298 |
+
(I0
|
299 |
+
tp127
|
300 |
+
g65
|
301 |
+
tp128
|
302 |
+
Rp129
|
303 |
+
(I1
|
304 |
+
(I3
|
305 |
+
I3
|
306 |
+
tp130
|
307 |
+
g72
|
308 |
+
I00
|
309 |
+
S'\xfc\xff\xff\xff\xff\xff\xef?s)\xcc\xc7\xdfN\x8e\xbc\x00\x00\x00\x00\x00\x00\xd09\x8fu\xdde\xf1l\x8b<\xfe\xff\xff\xff\xff\xff\xef?\x00\x00\x00\x00\x00\x00\x10:\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0?'
|
310 |
+
p131
|
311 |
+
tp132
|
312 |
+
bsg111
|
313 |
+
g0
|
314 |
+
(g85
|
315 |
+
g2
|
316 |
+
Ntp133
|
317 |
+
Rp134
|
318 |
+
(dp135
|
319 |
+
g89
|
320 |
+
g62
|
321 |
+
(g63
|
322 |
+
(I0
|
323 |
+
tp136
|
324 |
+
g65
|
325 |
+
tp137
|
326 |
+
Rp138
|
327 |
+
(I1
|
328 |
+
(I6
|
329 |
+
I2
|
330 |
+
tp139
|
331 |
+
g72
|
332 |
+
I00
|
333 |
+
S'\xeb\xbdE\xceL=\xfb?\xa4gE\xd3\xec\xeeV\xc0\x14\xd2\xc3Y\x0b=@\xc0\xac\xf4\xdcda A\xc0\x92$ o\xb1x?\xc0[\xeb\xc4K\xd0[A@I8K\xa9\x16o(@\xb9\x08b\xd9\xfa\x91W@\xa3F\xe9\x92.\x908@n\xd1\x9ap}p@@\xff\x894n9w9@\xa6\t\xbcc\x08\xf2A\xc0'
|
334 |
+
p140
|
335 |
+
tp141
|
336 |
+
bsg96
|
337 |
+
NsbsS'allow_mirror'
|
338 |
+
p142
|
339 |
+
I00
|
340 |
+
sS'_source'
|
341 |
+
p143
|
342 |
+
g134
|
343 |
+
sbsS'model'
|
344 |
+
p144
|
345 |
+
g0
|
346 |
+
(cmenpo.model.pca
|
347 |
+
PCAModel
|
348 |
+
p145
|
349 |
+
g2
|
350 |
+
Ntp146
|
351 |
+
Rp147
|
352 |
+
(dp148
|
353 |
+
S'centred'
|
354 |
+
p149
|
355 |
+
I01
|
356 |
+
sg84
|
357 |
+
g0
|
358 |
+
(g85
|
359 |
+
g2
|
360 |
+
Ntp150
|
361 |
+
Rp151
|
362 |
+
(dp152
|
363 |
+
g89
|
364 |
+
g62
|
365 |
+
(g63
|
366 |
+
(I0
|
367 |
+
tp153
|
368 |
+
g65
|
369 |
+
tp154
|
370 |
+
Rp155
|
371 |
+
(I1
|
372 |
+
(I6
|
373 |
+
I2
|
374 |
+
tp156
|
375 |
+
g72
|
376 |
+
I00
|
377 |
+
S'\xbe[\x86X\xbbp+@\xe5[2\t\x00cV\xc0o{D\x81\xa7+E\xc0\x97r&~f\xf6A\xc0\x04\xcbP\xb8)\x17A\xc0\xcc\xbaUg\xe2\xb5C@$\\\xcas\xc4\x8b2@:\xd9\xb7\xef\x8dPV@\x18t\xb6\xca\x19\x0f7@\xbc\xfd\x81\xbd\xc5pA@\xca\x8ef\x88f25@\x13@\xbcs]\x0bC\xc0'
|
378 |
+
p157
|
379 |
+
tp158
|
380 |
+
bsg96
|
381 |
+
NsbsS'_eigenvalues'
|
382 |
+
p159
|
383 |
+
g62
|
384 |
+
(g63
|
385 |
+
(I0
|
386 |
+
tp160
|
387 |
+
g65
|
388 |
+
tp161
|
389 |
+
Rp162
|
390 |
+
(I1
|
391 |
+
(I8
|
392 |
+
tp163
|
393 |
+
g72
|
394 |
+
I00
|
395 |
+
S'd\x8c-9=\xf2p@#\xa0\x90\xeaD\xbfQ@\xc5@K\x9a\xd93O@\xe50\x84-\xda\x166@\xb9\x03U\x8fb\xd80@\xebg\xa6\xc21\x92#@z8\\\x1e\xd5[ @\xf8\xde\xc6(\xcf\xfb\x19@'
|
396 |
+
p164
|
397 |
+
tp165
|
398 |
+
bsS'_trimmed_eigenvalues'
|
399 |
+
p166
|
400 |
+
g62
|
401 |
+
(g63
|
402 |
+
(I0
|
403 |
+
tp167
|
404 |
+
g65
|
405 |
+
tp168
|
406 |
+
Rp169
|
407 |
+
(I1
|
408 |
+
(I1
|
409 |
+
tp170
|
410 |
+
g72
|
411 |
+
I00
|
412 |
+
S'L\xa2\xee\xf8\xc3X\x0c@'
|
413 |
+
p171
|
414 |
+
tp172
|
415 |
+
bsg61
|
416 |
+
g62
|
417 |
+
(g63
|
418 |
+
(I0
|
419 |
+
tp173
|
420 |
+
g65
|
421 |
+
tp174
|
422 |
+
Rp175
|
423 |
+
(I1
|
424 |
+
(I8
|
425 |
+
I12
|
426 |
+
tp176
|
427 |
+
g72
|
428 |
+
I00
|
429 |
+
S'\xce\x7f&\xb9\xf8H\xb8?\xd0U\x01\xa7\x1f\x90\xda?T\xfazS\x89\xdc\xdf\xbf_y\x1c\xfah$\xb7\xbfj\xadM\xaa\xbb\x1d\xde\xbf\xffK\x98\xe4\xcb^\xa3\xbf\xbe\x1a\x06\xe4\x8c\xee\xbb?`\x14n\x19*S\xc5\xbf\x01 \x92J\xeb1\xd7?\x01F\x0c\xe2]?\xad\xbf\x14a\xebKx\xba\xd9?%l\xba\x8b\xac&\xb0\xbf\xed>HAs\xd2\xc7?\xa1\xa7\xe3\x89\xdfW\xda?\x1e\xdf\xb0\x8e\x8d\x0e\xb6\xbfm\xff\x10UP\xa7\xd2\xbf\xdf9\x1f\xa8\xa3\xdc\xb5?\x1b\xbc|\xe7\x8e\xee\xd6\xbfdg\xcf\xcb\xa0\xbc\xd4?){\x18\xa7\xa9!\xe1?:\xc6\xf7\xec\xb3\xd0\xcf\xbf\xa2\x1a1\x88*c\xc7\xbfn:S\xfc\x05\xb1\xd0\xbf_T\xb9]\xf9N\xbd\xbf\x11~\x90#\x9b4\xe2\xbf1\xa49\x06d\x8e\xd1?(\xc2\r\xed\x86J\xcc?\xa80\xd3\x16\x9b\x86\xc9\xbfE)z$=&\xcd?\xf1"\x13\x97\x17\xc8\xbe\xbfbr\xca=\x05\xbf\xdb\xbf\xffZ\x1f\x0e\xad-\xd1?v]\x9a8\xca\xe6\xce?\xd0\x8d\xdc@\xb7Z\xae\xbf\x13J\xda_t\xfc\xd3?\xbe\x18\x1ev\xcd\xf6\xc4\xbf_u\xd2-@\xa9\xd2\xbfy\xc8\xc4\xf9Z\xea\xcb\xbf\xa6\x84\xe2\x13\x04*b?\x01\x07\r</\xe8\xaa\xbf\x82\xfdP&\x8e\xbd\xbd\xbf\xdd\x8f\x1f\xe0\xe6"\xe3\xbf\x027\xf5\xd5\x81h\xce?1\xbc\x10\x1c\x9a|\x8a?\xc4\xc5\xf7qkH\xa5?\x9e\xben\xb7h\xf4\xd0?\xfa\xb6\n\xac\x02.\xc0?,\xf0)\xce\xd9\xe7\xe2?\xa1da\xba\xfc\xad\xb9?\x97\xc2 \x838\x81\xc6\xbf-\xc7\xb6\xc9\x9c\xfc\xb6\xbf\x8aQH\xa6\x96#\xe4?EjP\x04\xfd\x8f\xab\xbf\x10\xc5\xe3\xe4\xbc\xb3\xcf\xbf\xf4\x81\xf5G/2\xc6\xbfl\x14\x9c\xb8\xf6\xfc\xd2?\xda\xbf\xd8-\xd0\x99\xc3?\xe7k\x9f\xaf\xe4\x9f\xe2\xbf\xda\x1b\xb7\xc5\\G\xb0?`\x90Q8\x80X\xb4?\x1b\xce`\x10\xe5\x08\xbe?8BA\xc2O\x02\xb1\xbf\xe2\xbcMh<\xbe\xe4\xbf\x034\xb5\x88_\xf6\x96?.a@\x1f\x9b%\xe5?\xda9~\x97\xa8\x1d\xb9\xbfaG\x9b\x98\x03\xb7\xc6\xbf\xa7\xa5\xb1\xd4n\xec\xac\xbf\x84\x82a\xe2Y\xa3\xc1\xbf\x0b/\x85\xf4\xf5O\xc4?\x9f\xd1\x01\x17p\xb8\xc7?\x81G\xc1\xf1Wq\xa4?\x86\x96B\x935\xc2\xba?\x1cN\x81\xdb\xe1\xc9\xa2\xbf^E\xe2\x10$\xcb\xbd\xbfZw\xdc\xa8h\xa6\xd9\xbfO\xa5\xb3i\xdf\xc5\xbd?]\x89*\xb7\xe9\xe9\xc9?0v\x14P|\x9f\xc7\xbf\x16\x0c\xa5\xaa)E\x97?#\xf1\xd6\x1f\xb3\xf6\xdf?[GZa\x8e\x02\xd1\xbf\xab\xb3\xd121\xd6\xda\xbf\x0eSg\xcf\xeb\x98\xde?\x1f\xb3\xd3\xd1lD\xc0\xbfPBQ\xab\x9c7\xa7\xbf\xbb\xa9Li\x97\x05{?!5\xf9\xadf\x7f\xcd\xbf\xf2$\x1e\xea\x8b\x04n\xbf\xab\x0c<\x84\xb4_\xd7?kx\x9a\x8d\x1b\xfd\xc2?\xc2\x03\xc9\x83^\x05\x99?\xf9\x03B%W,\xde\xbf\xd9\xf2$\xc7\x12.\xde\xbf\xf8\xaa\x8d\x81\xf2\x9f\xdc?\xd2\x18\x13G\xaf\xe4\xd6?'
|
430 |
+
p177
|
431 |
+
tp178
|
432 |
+
bsg77
|
433 |
+
g62
|
434 |
+
(g63
|
435 |
+
(I0
|
436 |
+
tp179
|
437 |
+
g65
|
438 |
+
tp180
|
439 |
+
Rp181
|
440 |
+
(I1
|
441 |
+
(I12
|
442 |
+
tp182
|
443 |
+
g72
|
444 |
+
I00
|
445 |
+
S'\xeb\xbdE\xceL=\xfb?\xa4gE\xd3\xec\xeeV\xc0\x14\xd2\xc3Y\x0b=@\xc0\xac\xf4\xdcda A\xc0\x92$ o\xb1x?\xc0[\xeb\xc4K\xd0[A@I8K\xa9\x16o(@\xb9\x08b\xd9\xfa\x91W@\xa3F\xe9\x92.\x908@n\xd1\x9ap}p@@\xff\x894n9w9@\xa6\t\xbcc\x08\xf2A\xc0'
|
446 |
+
p183
|
447 |
+
tp184
|
448 |
+
bsS'n_samples'
|
449 |
+
p185
|
450 |
+
I3148
|
451 |
+
sS'_n_active_components'
|
452 |
+
p186
|
453 |
+
I8
|
454 |
+
sbsbasS'reference_shape'
|
455 |
+
p187
|
456 |
+
g0
|
457 |
+
(g85
|
458 |
+
g2
|
459 |
+
Ntp188
|
460 |
+
Rp189
|
461 |
+
(dp190
|
462 |
+
g89
|
463 |
+
g62
|
464 |
+
(g63
|
465 |
+
(I0
|
466 |
+
tp191
|
467 |
+
g65
|
468 |
+
tp192
|
469 |
+
Rp193
|
470 |
+
(I1
|
471 |
+
(I6
|
472 |
+
I2
|
473 |
+
tp194
|
474 |
+
g72
|
475 |
+
I00
|
476 |
+
S"\xe1P\xbe\x86\x923x@ft\xc0\x89\xe9Mw@\xe5\x85\x92\xd3\xeb\x05v@\xa1\xacg\xaf?\x02{@\xa9\x1b\xaa\x8fV\x19v@jk\x0bsSo\x7f@\x00x\x84s\x84\xedx@\xa0\x88\xaf\xe9\xe6\x9d\x81@!\xe0[\x12\x17\xb8y@6q\x1b'kL\x7f@\x1eE\xfaF\xd2\xc1y@\xe6\xa8?\xcd\x08\xdez@"
|
477 |
+
p195
|
478 |
+
tp196
|
479 |
+
bsg96
|
480 |
+
Nsbsg46
|
481 |
+
(lp197
|
482 |
+
g47
|
483 |
+
asg49
|
484 |
+
g32
|
485 |
+
sg44
|
486 |
Nsb.
|
MakeItTalk/thirdparty/face_of_art/pdm_clm_models/clm_models/basic_mouth
CHANGED
@@ -1,490 +1,490 @@
|
|
1 |
-
ccopy_reg
|
2 |
-
_reconstructor
|
3 |
-
p0
|
4 |
-
(cmenpofit.clm.base
|
5 |
-
CLM
|
6 |
-
p1
|
7 |
-
c__builtin__
|
8 |
-
object
|
9 |
-
p2
|
10 |
-
Ntp3
|
11 |
-
Rp4
|
12 |
-
(dp5
|
13 |
-
S'opt'
|
14 |
-
p6
|
15 |
-
(dp7
|
16 |
-
S'ablation'
|
17 |
-
p8
|
18 |
-
(I01
|
19 |
-
I01
|
20 |
-
tp9
|
21 |
-
sS'verbose'
|
22 |
-
p10
|
23 |
-
I00
|
24 |
-
sS'rho2'
|
25 |
-
p11
|
26 |
-
I20
|
27 |
-
sS'sigRate'
|
28 |
-
p12
|
29 |
-
F0.25
|
30 |
-
sS'ratio2'
|
31 |
-
p13
|
32 |
-
F0.08
|
33 |
-
sS'imgDir'
|
34 |
-
p14
|
35 |
-
S'/Users/arik/Desktop/artistic_faces/applications/AF_sample'
|
36 |
-
p15
|
37 |
-
sS'dataset'
|
38 |
-
p16
|
39 |
-
S'demo'
|
40 |
-
p17
|
41 |
-
sS'ratio1'
|
42 |
-
p18
|
43 |
-
F0.12
|
44 |
-
sS'smooth'
|
45 |
-
p19
|
46 |
-
I01
|
47 |
-
sS'pdm_rho'
|
48 |
-
p20
|
49 |
-
I20
|
50 |
-
sS'sigOffset'
|
51 |
-
p21
|
52 |
-
I25
|
53 |
-
sS'kernel_covariance'
|
54 |
-
p22
|
55 |
-
I10
|
56 |
-
sS'numIter'
|
57 |
-
p23
|
58 |
-
I5
|
59 |
-
ssS'_shape_model_cls'
|
60 |
-
p24
|
61 |
-
(lp25
|
62 |
-
cmenpofit.modelinstance
|
63 |
-
OrthoPDM
|
64 |
-
p26
|
65 |
-
asS'max_shape_components'
|
66 |
-
p27
|
67 |
-
(lp28
|
68 |
-
NasS'scales'
|
69 |
-
p29
|
70 |
-
(lp30
|
71 |
-
I1
|
72 |
-
asS'diagonal'
|
73 |
-
p31
|
74 |
-
I200
|
75 |
-
sS'holistic_features'
|
76 |
-
p32
|
77 |
-
(lp33
|
78 |
-
cmenpo.feature.features
|
79 |
-
no_op
|
80 |
-
p34
|
81 |
-
asS'patch_shape'
|
82 |
-
p35
|
83 |
-
(lp36
|
84 |
-
(I8
|
85 |
-
I8
|
86 |
-
tp37
|
87 |
-
asS'expert_ensemble_cls'
|
88 |
-
p38
|
89 |
-
(lp39
|
90 |
-
cmenpofit.clm.expert.ensemble
|
91 |
-
FcnFilterExpertEnsemble
|
92 |
-
p40
|
93 |
-
asS'expert_ensembles'
|
94 |
-
p41
|
95 |
-
(lp42
|
96 |
-
g0
|
97 |
-
(g40
|
98 |
-
g2
|
99 |
-
Ntp43
|
100 |
-
Rp44
|
101 |
-
(dp45
|
102 |
-
S'sample_offsets'
|
103 |
-
p46
|
104 |
-
NsS'cosine_mask'
|
105 |
-
p47
|
106 |
-
I01
|
107 |
-
sS'context_shape'
|
108 |
-
p48
|
109 |
-
(I8
|
110 |
-
I8
|
111 |
-
tp49
|
112 |
-
sg35
|
113 |
-
g37
|
114 |
-
sS'response_covariance'
|
115 |
-
p50
|
116 |
-
I3
|
117 |
-
sS'patch_normalisation'
|
118 |
-
p51
|
119 |
-
g34
|
120 |
-
sS'_icf'
|
121 |
-
p52
|
122 |
-
Nsbasg47
|
123 |
-
I01
|
124 |
-
sS'shape_models'
|
125 |
-
p53
|
126 |
-
(lp54
|
127 |
-
g0
|
128 |
-
(g26
|
129 |
-
g2
|
130 |
-
Ntp55
|
131 |
-
Rp56
|
132 |
-
(dp57
|
133 |
-
S'similarity_model'
|
134 |
-
p58
|
135 |
-
g0
|
136 |
-
(cmenpofit.modelinstance
|
137 |
-
_SimilarityModel
|
138 |
-
p59
|
139 |
-
g2
|
140 |
-
Ntp60
|
141 |
-
Rp61
|
142 |
-
(dp62
|
143 |
-
S'_components'
|
144 |
-
p63
|
145 |
-
cnumpy.core.multiarray
|
146 |
-
_reconstruct
|
147 |
-
p64
|
148 |
-
(cnumpy
|
149 |
-
ndarray
|
150 |
-
p65
|
151 |
-
(I0
|
152 |
-
tp66
|
153 |
-
S'b'
|
154 |
-
p67
|
155 |
-
tp68
|
156 |
-
Rp69
|
157 |
-
(I1
|
158 |
-
(I4
|
159 |
-
I40
|
160 |
-
tp70
|
161 |
-
cnumpy
|
162 |
-
dtype
|
163 |
-
p71
|
164 |
-
(S'f8'
|
165 |
-
p72
|
166 |
-
I0
|
167 |
-
I1
|
168 |
-
tp73
|
169 |
-
Rp74
|
170 |
-
(I3
|
171 |
-
S'<'
|
172 |
-
p75
|
173 |
-
NNNI-1
|
174 |
-
I-1
|
175 |
-
I0
|
176 |
-
tp76
|
177 |
-
bI00
|
178 |
-
S'\xc0I\xb4\xb5\xa8\xc6\x9c\xbf\xd5J\xf4m\x93\xc6\xd8\xbf\xfe\xda\xff\x17\x85\x85\xb7\xbfQ\x02\x84\xbb0\xee\xce\xbf\xd0\xa6w]-\xbe\xbf\xbf\x9c\xd6FO\xbb\x8c\xb9\xbf\xdf\xd9\xdd^\x0c\\\xb9\xbf+M\x9f\x10\xfe\x0f|\xbf\x04pM0\xd2\x14\xc0\xbf\x925\x9b\xd1\x0b3\xb9?\xb8\xf6Y,\xaa-\xb8\xbf\x9a\xb4{\xfb\x137\xcf?\xb9\xc7\x9c\tdL\xa5\xbf\xe5\xc2\x8a\x17\x84\xaa\xd8?\xd7\xb4\xba\xc4\x80\xf1\xb7?\xc2\xd1\xcf$\x80\t\xd0?\x12\xd2\x9a\xfbD\xb2\xc3?;\x8e\x95;\x1d\xc3\xbc?V\xad\xb2\xf0\x97e\xc5?\xc9\xaa\x0f\xd4\x91!7\xbf\x9d\xb1\xbc+\xba?\xc4?\xefy\x89\xb1k\x82\xba\xbf\xc6\x8c\xdaE0U\xba?\x81\xea\xe5\x9a\xaa4\xcf\xbf\xbc\x87\xc0\xb6\xf1\x90\x97\xbf(\xe8\xdf\xb2b\xe7\xd4\xbf\xd0\x1a\x17\xbd*\xb8\xa2\xbf\xb4\x88\x0c\xf88\x98\xb9\xbf\xadCH\xd1\xb7+\x9c\xbf\x0c%]\xdf+\x1du\xbf\xffF\xa31\xcbq\xa4\xbf\xd3\xa1y\xe7\x99\xe9\xb9?\x85\x94=\xdb\xf9\x1c\xa1\xbf\x8e\xc1\x11n\xe3\xcb\xd4?C\xb5l0+(\x98?\xc6IN\xd9c\x83\xba?\xf9\x1a[\xb4\x17o\xa3?\xd6\x92CNp\xb8q\xbf\x91\xdfE\xe6W\xa8\x9c?\'\xe3\xe7\x0f\x83\xe6\xb9\xbf\xd7J\xf4m\x93\xc6\xd8?\x9eI\xb4\xb5\xa8\xc6\x9c\xbfQ\x02\x84\xbb0\xee\xce?\x00\xdb\xff\x17\x85\x85\xb7\xbf\x9c\xd6FO\xbb\x8c\xb9?\xd1\xa6w]-\xbe\xbf\xbf(M\x9f\x10\xfe\x0f|?\xdf\xd9\xdd^\x0c\\\xb9\xbf\x925\x9b\xd1\x0b3\xb9\xbf\x03pM0\xd2\x14\xc0\xbf\x99\xb4{\xfb\x137\xcf\xbf\xb8\xf6Y,\xaa-\xb8\xbf\xe5\xc2\x8a\x17\x84\xaa\xd8\xbf\xb8\xc7\x9c\tdL\xa5\xbf\xc2\xd1\xcf$\x80\t\xd0\xbf\xd7\xb4\xba\xc4\x80\xf1\xb7?;\x8e\x95;\x1d\xc3\xbc\xbf\x11\xd2\x9a\xfbD\xb2\xc3?P\xaa\x0f\xd4\x91!7?U\xad\xb2\xf0\x97e\xc5?\xefy\x89\xb1k\x82\xba?\x9e\xb1\xbc+\xba?\xc4?\x81\xea\xe5\x9a\xaa4\xcf?\xc6\x8c\xdaE0U\xba?(\xe8\xdf\xb2b\xe7\xd4?\xbd\x87\xc0\xb6\xf1\x90\x97\xbf\xb4\x88\x0c\xf88\x98\xb9?\xd1\x1a\x17\xbd*\xb8\xa2\xbf\x0b%]\xdf+\x1du?\xadCH\xd1\xb7+\x9c\xbf\xd3\xa1y\xe7\x99\xe9\xb9\xbf\xffF\xa31\xcbq\xa4\xbf\x8f\xc1\x11n\xe3\xcb\xd4\xbf\x85\x94=\xdb\xf9\x1c\xa1\xbf\xc6IN\xd9c\x83\xba\xbfD\xb5l0+(\x98?\xd6\x92CNp\xb8q?\xf9\x1a[\xb4\x17o\xa3?&\xe3\xe7\x0f\x83\xe6\xb9?\x91\xdfE\xe6W\xa8\x9c?\xd0\xed\xbf\xc5%\x9f\xcc\xbf\x08r\x97l\x01,\xa0<\xd9\xed\xbf\xc5%\x9f\xcc\xbf\xd6]\x7f+[Z\x88<\xd7\xed\xbf\xc5%\x9f\xcc\xbf$\xc2\xce\xb8\x10\x1dz\xbc\xd7\xed\xbf\xc5%\x9f\xcc\xbf\x8bv\xc0\xdb^\xdf\x89\xbc\xda\xed\xbf\xc5%\x9f\xcc\xbf\xa6\xa4\x1a\xea\xd8\x83\x9b\xbc\xdb\xed\xbf\xc5%\x9f\xcc\xbfXk\x00C\xe3\x10\xa3\xbc\xdf\xed\xbf\xc5%\x9f\xcc\xbfM\x05_\\\xf6\x0f\xa6\xbc\xde\xed\xbf\xc5%\x9f\xcc\xbf\n\x0cSg9a\x8d\xbc\xde\xed\xbf\xc5%\x9f\xcc\xbf\x9b\xfb\xee\xcf\xbb\xd7\x85<\xdb\xed\xbf\xc5%\x9f\xcc\xbfg6\t\x98\x9eM\x97<\xda\xed\xbf\xc5%\x9f\xcc\xbf\'R\x8b\xa6\x07\x12\xa0<\xd8\xed\xbf\xc5%\x9f\xcc\xbf\xa7L\xb9\xeda^\xa2<\xd4\xed\xbf\xc5%\x9f\xcc\xbf\x08\xfaJ\xff\xa0v\x9d<\xd7\xed\xbf\xc5%\x9f\xcc\xbf\x84>E\xae+\xe5t<\xd9\xed\xbf\xc5%\x9f\xcc\xbf\x1d\xb3\x9fq\x04\tj\xbc\xda\xed\xbf\xc5%\x9f\xcc\xbf7\xcc\x1bE3\xdb\x8d\xbc\xde\xed\xbf\xc5%\x9f\xcc\xbfx\\\x95\x05\xac\xb5\xa2\xbc\xdd\xed\xbf\xc5%\x9f\xcc\xbfL\x00F\x0e\x8c`~\xbc\xda\xed\xbf\xc5%\x9f\xcc\xbf+HV}\xa2\tw<\xd6\xed\xbf\xc5%\x9f\xcc\xbfs\x16[\xd0\x04\xa2\x8b<\xb9n&\xb8\xfc\xb1\x9c\xbc\xd6\xed\xbf\xc5%\x9f\xcc\xbf\x1fv|\xc5\t=y\xbc\xd3\xed\xbf\xc5%\x9f\xcc\xbf\t\x8f\xf37"#{<\xd7\xed\xbf\xc5%\x9f\xcc\xbf\xb3\x86o?\xc2\xee\x86<\xd9\xed\xbf\xc5%\x9f\xcc\xbfY\xa3j[\x19\xd3\x97<\xdc\xed\xbf\xc5%\x9f\xcc\xbf\xeb\xe3\xdb0\xd3\x99\x9e<\xdc\xed\xbf\xc5%\x9f\xcc\xbf\xf2\xa8\xd8P\xc6V\xa3<\xe0\xed\xbf\xc5%\x9f\xcc\xbfo\x83\xf4\xdf`\xf6\x80<\xe1\xed\xbf\xc5%\x9f\xcc\xbfE$\xd3%\xe6\xfc\x84\xbc\xe0\xed\xbf\xc5%\x9f\xcc\xbfjw\xa8\xf2\nx\x95\xbc\xdb\xed\xbf\xc5%\x9f\xcc\xbf\xfd\x17U\xee\xe3\xf1\x9b\xbc\xda\xed\xbf\xc5%\x9f\xcc\xbf\x7fm\xbeQt\x07\xa1\xbc\xd8\xed\xbf\xc5%\x9f\xcc\xbf\xb3\xa8\xd9N\r\xd7\x99\xbc\xd3\xed\xbf\xc5%\x9f\xcc\xbfqd\x01\xfb\xd9\xcar\xbc\xd7\xed\xbf\xc5%\x9f\xcc\xbf\x98?\xbeC\'5g<\xda\xed\xbf\xc5%\x9f\xcc\xbf!d\xa0\t\x05\xc2\x8e<\xdd\xed\xbf\xc5%\x9f\xcc\xbf\xa5\xe5\x8f1\xdbJ\x9f<\xde\xed\xbf\xc5%\x9f\xcc\xbf\x1a\x0b*\xce\xca\x0ft<\xdd\xed\xbf\xc5%\x9f\xcc\xbf\x7fc\xff\xed\xc5\x9bv\xbc\xdb\xed\xbf\xc5%\x9f\xcc\xbf\xc0 \xeaA(p\x88\xbc\xd8\xed\xbf\xc5%\x9f\xcc\xbf'
|
179 |
-
p77
|
180 |
-
tp78
|
181 |
-
bsS'_mean'
|
182 |
-
p79
|
183 |
-
g64
|
184 |
-
(g65
|
185 |
-
(I0
|
186 |
-
tp80
|
187 |
-
g67
|
188 |
-
tp81
|
189 |
-
Rp82
|
190 |
-
(I1
|
191 |
-
(I40
|
192 |
-
tp83
|
193 |
-
g74
|
194 |
-
I00
|
195 |
-
S'+\xc4\xbcx\x90\x9b\x19\xc0\xf7\xc2:\x7f<\x0cV\xc0<%\x19\x9a\x87\xee4\xc0\x0c\xc6r\xc3^\x86K\xc0R`B\xfat?<\xc0nI\xd2\x0f\x93\xbc6\xc0\xe3\x99\xe1U@\x916\xc0\x99\'U\x8e\x02\xf9\xf8\xbf\x08!\x9c\xee\x16\x9f<\xc0Au\x8a}\xc3l6@wN\xe1D)\x845\xc0E\xa7\xb6\x90;\xc7K@R!\xe1K\x10\xf4"\xc0\xc5\xfa\xdd\x11D\xf3U@\x81\x8c\xc0\x9c\x9fN5@\x90w\t\x03\xf1\x8aL@p\x9f\xdc\x1e\x19\x87A@g\xf04\xe9h\x989@\x1b\x9a~\xd2}\nC@B\xebG\x95\x95\x95\xb4\xbfp\xa4q\x1c\xfb\x04B@a]\xfbU6\x977\xc0\xb5\xa4|\xd9\xf5n7@\x86=\xd3)\x16\xc5K\xc0\xb8z\x156\xb2\xf8\x14\xc0n\x16\xaf\xf0-\x9aR\xc0\x8e\xda=c\x88\xa8 \xc0\x07\xa2\x8d\xd5\xcc\xc66\xc0\x17PH\xd2\xae\x11\x19\xc0\xccP\x89\x14\x0b\xca\xf2\xbf@\xfd}\xef\x881"\xc0h\xeb\xb8\xfd7\x0f7@\xcf\x1f<\x11;u\x1e\xc0\x05\xc1"\xb8\xb5\x81R@\x0fV\xe42E\x7f\x15@}\x8d\x1f+\x13\x987@\xd6 yRQK!@(\x17W\x12\xec\x89\xef\xbf\x02\x8fH\'\x96\x80\x19@%n69x\x0c7\xc0'
|
196 |
-
p84
|
197 |
-
tp85
|
198 |
-
bsS'template_instance'
|
199 |
-
p86
|
200 |
-
g0
|
201 |
-
(cmenpo.shape.pointcloud
|
202 |
-
PointCloud
|
203 |
-
p87
|
204 |
-
g2
|
205 |
-
Ntp88
|
206 |
-
Rp89
|
207 |
-
(dp90
|
208 |
-
S'points'
|
209 |
-
p91
|
210 |
-
g64
|
211 |
-
(g65
|
212 |
-
(I0
|
213 |
-
tp92
|
214 |
-
g67
|
215 |
-
tp93
|
216 |
-
Rp94
|
217 |
-
(I1
|
218 |
-
(I20
|
219 |
-
I2
|
220 |
-
tp95
|
221 |
-
g74
|
222 |
-
I00
|
223 |
-
S'+\xc4\xbcx\x90\x9b\x19\xc0\xf7\xc2:\x7f<\x0cV\xc0<%\x19\x9a\x87\xee4\xc0\x0c\xc6r\xc3^\x86K\xc0R`B\xfat?<\xc0nI\xd2\x0f\x93\xbc6\xc0\xe3\x99\xe1U@\x916\xc0\x99\'U\x8e\x02\xf9\xf8\xbf\x08!\x9c\xee\x16\x9f<\xc0Au\x8a}\xc3l6@wN\xe1D)\x845\xc0E\xa7\xb6\x90;\xc7K@R!\xe1K\x10\xf4"\xc0\xc5\xfa\xdd\x11D\xf3U@\x81\x8c\xc0\x9c\x9fN5@\x90w\t\x03\xf1\x8aL@p\x9f\xdc\x1e\x19\x87A@g\xf04\xe9h\x989@\x1b\x9a~\xd2}\nC@B\xebG\x95\x95\x95\xb4\xbfp\xa4q\x1c\xfb\x04B@a]\xfbU6\x977\xc0\xb5\xa4|\xd9\xf5n7@\x86=\xd3)\x16\xc5K\xc0\xb8z\x156\xb2\xf8\x14\xc0n\x16\xaf\xf0-\x9aR\xc0\x8e\xda=c\x88\xa8 \xc0\x07\xa2\x8d\xd5\xcc\xc66\xc0\x17PH\xd2\xae\x11\x19\xc0\xccP\x89\x14\x0b\xca\xf2\xbf@\xfd}\xef\x881"\xc0h\xeb\xb8\xfd7\x0f7@\xcf\x1f<\x11;u\x1e\xc0\x05\xc1"\xb8\xb5\x81R@\x0fV\xe42E\x7f\x15@}\x8d\x1f+\x13\x987@\xd6 yRQK!@(\x17W\x12\xec\x89\xef\xbf\x02\x8fH\'\x96\x80\x19@%n69x\x0c7\xc0'
|
224 |
-
p96
|
225 |
-
tp97
|
226 |
-
bsS'_landmarks'
|
227 |
-
p98
|
228 |
-
NsbsbsS'similarity_weights'
|
229 |
-
p99
|
230 |
-
g64
|
231 |
-
(g65
|
232 |
-
(I0
|
233 |
-
tp100
|
234 |
-
g67
|
235 |
-
tp101
|
236 |
-
Rp102
|
237 |
-
(I1
|
238 |
-
(I4
|
239 |
-
tp103
|
240 |
-
g74
|
241 |
-
I00
|
242 |
-
S'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
|
243 |
-
p104
|
244 |
-
tp105
|
245 |
-
bsS'_weights'
|
246 |
-
p106
|
247 |
-
g64
|
248 |
-
(g65
|
249 |
-
(I0
|
250 |
-
tp107
|
251 |
-
g67
|
252 |
-
tp108
|
253 |
-
Rp109
|
254 |
-
(I1
|
255 |
-
(I36
|
256 |
-
tp110
|
257 |
-
g74
|
258 |
-
I00
|
259 |
-
S'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
|
260 |
-
p111
|
261 |
-
tp112
|
262 |
-
bsS'_target'
|
263 |
-
p113
|
264 |
-
g0
|
265 |
-
(g87
|
266 |
-
g2
|
267 |
-
Ntp114
|
268 |
-
Rp115
|
269 |
-
(dp116
|
270 |
-
g91
|
271 |
-
g64
|
272 |
-
(g65
|
273 |
-
(I0
|
274 |
-
tp117
|
275 |
-
g67
|
276 |
-
tp118
|
277 |
-
Rp119
|
278 |
-
(I1
|
279 |
-
(I20
|
280 |
-
I2
|
281 |
-
tp120
|
282 |
-
g74
|
283 |
-
I00
|
284 |
-
S'(\xc4\xbcx\x90\x9b\x19\xc0\xf6\xc2:\x7f<\x0cV\xc09%\x19\x9a\x87\xee4\xc0\x0b\xc6r\xc3^\x86K\xc0N`B\xfat?<\xc0mI\xd2\x0f\x93\xbc6\xc0\xdf\x99\xe1U@\x916\xc0\x99\'U\x8e\x02\xf9\xf8\xbf\x04!\x9c\xee\x16\x9f<\xc0@u\x8a}\xc3l6@tN\xe1D)\x845\xc0D\xa7\xb6\x90;\xc7K@N!\xe1K\x10\xf4"\xc0\xc4\xfa\xdd\x11D\xf3U@~\x8c\xc0\x9c\x9fN5@\x8fw\t\x03\xf1\x8aL@m\x9f\xdc\x1e\x19\x87A@f\xf04\xe9h\x989@\x18\x9a~\xd2}\nC@/\xebG\x95\x95\x95\xb4\xbfm\xa4q\x1c\xfb\x04B@`]\xfbU6\x977\xc0\xb1\xa4|\xd9\xf5n7@\x85=\xd3)\x16\xc5K\xc0\xb6z\x156\xb2\xf8\x14\xc0m\x16\xaf\xf0-\x9aR\xc0\x8b\xda=c\x88\xa8 \xc0\x06\xa2\x8d\xd5\xcc\xc66\xc0\x13PH\xd2\xae\x11\x19\xc0\xccP\x89\x14\x0b\xca\xf2\xbf=\xfd}\xef\x881"\xc0g\xeb\xb8\xfd7\x0f7@\xc9\x1f<\x11;u\x1e\xc0\x04\xc1"\xb8\xb5\x81R@\x0cV\xe42E\x7f\x15@|\x8d\x1f+\x13\x987@\xd3 yRQK!@&\x17W\x12\xec\x89\xef\xbf\xfe\x8eH\'\x96\x80\x19@$n69x\x0c7\xc0'
|
285 |
-
p121
|
286 |
-
tp122
|
287 |
-
bsg98
|
288 |
-
NsbsS'global_transform'
|
289 |
-
p123
|
290 |
-
g0
|
291 |
-
(cmenpofit.transform.homogeneous
|
292 |
-
DifferentiableAlignmentSimilarity
|
293 |
-
p124
|
294 |
-
g2
|
295 |
-
Ntp125
|
296 |
-
Rp126
|
297 |
-
(dp127
|
298 |
-
S'_h_matrix'
|
299 |
-
p128
|
300 |
-
g64
|
301 |
-
(g65
|
302 |
-
(I0
|
303 |
-
tp129
|
304 |
-
g67
|
305 |
-
tp130
|
306 |
-
Rp131
|
307 |
-
(I1
|
308 |
-
(I3
|
309 |
-
I3
|
310 |
-
tp132
|
311 |
-
g74
|
312 |
-
I00
|
313 |
-
S'\xfb\xff\xff\xff\xff\xff\xef?D\xbc\xd8\x8bG\xe0k<\x00\x00\x00\x00\x00\x00\xe8\xb9F\x87\xeb\x1b:`_<\xff\xff\xff\xff\xff\xff\xef?\x00\x00\x00\x00\x00\x00\xd0\xb9\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0?'
|
314 |
-
p133
|
315 |
-
tp134
|
316 |
-
bsg113
|
317 |
-
g0
|
318 |
-
(g87
|
319 |
-
g2
|
320 |
-
Ntp135
|
321 |
-
Rp136
|
322 |
-
(dp137
|
323 |
-
g91
|
324 |
-
g64
|
325 |
-
(g65
|
326 |
-
(I0
|
327 |
-
tp138
|
328 |
-
g67
|
329 |
-
tp139
|
330 |
-
Rp140
|
331 |
-
(I1
|
332 |
-
(I20
|
333 |
-
I2
|
334 |
-
tp141
|
335 |
-
g74
|
336 |
-
I00
|
337 |
-
S'+\xc4\xbcx\x90\x9b\x19\xc0\xf7\xc2:\x7f<\x0cV\xc0<%\x19\x9a\x87\xee4\xc0\x0c\xc6r\xc3^\x86K\xc0R`B\xfat?<\xc0nI\xd2\x0f\x93\xbc6\xc0\xe3\x99\xe1U@\x916\xc0\x99\'U\x8e\x02\xf9\xf8\xbf\x08!\x9c\xee\x16\x9f<\xc0Au\x8a}\xc3l6@wN\xe1D)\x845\xc0E\xa7\xb6\x90;\xc7K@R!\xe1K\x10\xf4"\xc0\xc5\xfa\xdd\x11D\xf3U@\x81\x8c\xc0\x9c\x9fN5@\x90w\t\x03\xf1\x8aL@p\x9f\xdc\x1e\x19\x87A@g\xf04\xe9h\x989@\x1b\x9a~\xd2}\nC@B\xebG\x95\x95\x95\xb4\xbfp\xa4q\x1c\xfb\x04B@a]\xfbU6\x977\xc0\xb5\xa4|\xd9\xf5n7@\x86=\xd3)\x16\xc5K\xc0\xb8z\x156\xb2\xf8\x14\xc0n\x16\xaf\xf0-\x9aR\xc0\x8e\xda=c\x88\xa8 \xc0\x07\xa2\x8d\xd5\xcc\xc66\xc0\x17PH\xd2\xae\x11\x19\xc0\xccP\x89\x14\x0b\xca\xf2\xbf@\xfd}\xef\x881"\xc0h\xeb\xb8\xfd7\x0f7@\xcf\x1f<\x11;u\x1e\xc0\x05\xc1"\xb8\xb5\x81R@\x0fV\xe42E\x7f\x15@}\x8d\x1f+\x13\x987@\xd6 yRQK!@(\x17W\x12\xec\x89\xef\xbf\x02\x8fH\'\x96\x80\x19@%n69x\x0c7\xc0'
|
338 |
-
p142
|
339 |
-
tp143
|
340 |
-
bsg98
|
341 |
-
NsbsS'allow_mirror'
|
342 |
-
p144
|
343 |
-
I00
|
344 |
-
sS'_source'
|
345 |
-
p145
|
346 |
-
g136
|
347 |
-
sbsS'model'
|
348 |
-
p146
|
349 |
-
g0
|
350 |
-
(cmenpo.model.pca
|
351 |
-
PCAModel
|
352 |
-
p147
|
353 |
-
g2
|
354 |
-
Ntp148
|
355 |
-
Rp149
|
356 |
-
(dp150
|
357 |
-
S'centred'
|
358 |
-
p151
|
359 |
-
I01
|
360 |
-
sg86
|
361 |
-
g0
|
362 |
-
(g87
|
363 |
-
g2
|
364 |
-
Ntp152
|
365 |
-
Rp153
|
366 |
-
(dp154
|
367 |
-
g91
|
368 |
-
g64
|
369 |
-
(g65
|
370 |
-
(I0
|
371 |
-
tp155
|
372 |
-
g67
|
373 |
-
tp156
|
374 |
-
Rp157
|
375 |
-
(I1
|
376 |
-
(I20
|
377 |
-
I2
|
378 |
-
tp158
|
379 |
-
g74
|
380 |
-
I00
|
381 |
-
S'\n\x10\xa7&WA$@X\xde\x0f\xf5\xdesU\xc0j*\xf7\x87\x1a\xbf6\xc0\xec\xd1\xdaw\xe1\xa9K\xc0\xf9\xaa\x1aC\xb2pB\xc0k2\xe7r\xc8\'8\xc0\xf6\x90\x08\x1b\xd6\xc8@\xc0\x91\x88g@\xdfK\t\xc0\xf6\xcd\x8d\xf1\xe9\x87B\xc0\x0c\xe9h\xae\xb8\xfb3@\xf1\x0f_\xb4\x7fI6\xc0\xa7\x9dC\xb1\xe7KL@(\xf2(V\xe1}\x14@\x0e\x05n]\x7f\xc8W@b\x15%\xa4b15@A\xbc&\x9a\x04dL@O5\xf38N=?@\xc8\\\xd4\x14\xa6\xe35@\xdb\xa8-\xe7\xa6\xc1@@\x15\xddg\xb6jX\xd0\xbf\x1e\xb3o\x10\xe4\xf2@@S\xfd\xfd\xea,{8\xc0~\xb7\xd8\x9a?\x878@g\x97`\xadH\xc2K\xc0r\xd4i0Y\n @F\x04{\xa6v\xb3R\xc0\xc3x\xac\x11\xcf\xa9\x1b\xc0\xb5\xa4f\x06n\x8a7\xc00\x04\xcf\xbfH\x89"\xc0\xb2&"\xb8\x9b\xe4\xe4\xbf~+V*\x13\xd0"\xc0\xc5j\xf9\xfeu%4@\xe4\xb9\x0e\xf6D\xc6\x19@uk\xa1\xe0p\x85T@S\xec\x0b\x93@R\xd8\xbf\xd9JD\r\xdd=4@wG\xe3\x88\x0c\x00\x10@\xc6\x18\xd9\xf3Vr\xf2\xbfb\x92\xd2\xe1\x93\x7f\xdb?\xac\x0c\xbaa\x15\xfe7\xc0'
|
382 |
-
p159
|
383 |
-
tp160
|
384 |
-
bsg98
|
385 |
-
NsbsS'_eigenvalues'
|
386 |
-
p161
|
387 |
-
g64
|
388 |
-
(g65
|
389 |
-
(I0
|
390 |
-
tp162
|
391 |
-
g67
|
392 |
-
tp163
|
393 |
-
Rp164
|
394 |
-
(I1
|
395 |
-
(I36
|
396 |
-
tp165
|
397 |
-
g74
|
398 |
-
I00
|
399 |
-
S's\xebe\x0f^\x07\x90@q\xb5\xf4|\x96\xc6\x8f@\xb4\xac!`@D\x80@Ud X\xc2\x9fb@\xedv\x8f\xee\xc0R[@v\xe8O\x95\xe6\x9eD@\x9a\xc0\xbc\xcb\xf0\x04A@\xcb\xfb\xfch)\xa5<@~A\xc5\x0bs\x837@\xb7\xf8a\x97\x8aL4@\xe1g\x89\xf7\xf8\xeb/@1{&\xdfD\x94,@MB\xeb\x82?u*@nr\x81\x8d\xd6\xb9!@\xb8\xf6`\xaa\x9f[ @]\xbe\r\xbe\x15?\x1a@\xd7\xcc\xa8\xe7\x87\x8b\x18@:\x98\xc5\x83\xdf\x02\x15@\xfd\xb5(CNV\x12@.\xa4\x8a\xf7\'\x1c\x11@\xb2\xc2\xe8L"\xab\x0e@Aw\xd6\x11\xb9\xb7\n@\xbe\xf1\xae\xb2\xa5\x8c\x04@e\xf0\x99\xffU\x7f\x02@\xf2\xd5X\x92\xb4\xcb\x01@p@\x1f\xf0F\x9e\x00@MR\x15\xd8\x9a(\x00@]\xf5\xbd\xdfy\x04\xff?\x9c3 \xa9\xf3\x9f\xf6?\x81\xab\xf2\\\xef\x8f\xf5?\x86\xd9\xe6\x17\x7f4\xf3?\xfey[b\xe4\xfc\xf0?\nx\xc8\x91F3\xee?\x1etI^0\xba\xea?\x8eh("\x98\xcf\xe0?\xef\xf5\xcdhz\x95\xd8?'
|
400 |
-
p166
|
401 |
-
tp167
|
402 |
-
bsS'_trimmed_eigenvalues'
|
403 |
-
p168
|
404 |
-
g64
|
405 |
-
(g65
|
406 |
-
(I0
|
407 |
-
tp169
|
408 |
-
g67
|
409 |
-
tp170
|
410 |
-
Rp171
|
411 |
-
(I1
|
412 |
-
(I1
|
413 |
-
tp172
|
414 |
-
g74
|
415 |
-
I00
|
416 |
-
S'j\xd0r@\xe9=\xca?'
|
417 |
-
p173
|
418 |
-
tp174
|
419 |
-
bsg63
|
420 |
-
g64
|
421 |
-
(g65
|
422 |
-
(I0
|
423 |
-
tp175
|
424 |
-
g67
|
425 |
-
tp176
|
426 |
-
Rp177
|
427 |
-
(I1
|
428 |
-
(I36
|
429 |
-
I40
|
430 |
-
tp178
|
431 |
-
g74
|
432 |
-
I00
|
433 |
-
S'\x98\x8a\xbb\xd6\x0c!p\xbfCJ_41\x93\xd7?\x7f\xd9J\xdesU\xc4\xbf\xb1\x85/\xd3~\x11\xab?@m\xcdETS\xcb\xbf\xc6ey8\x05O\xb3\xbf\xd0\n\xa2\x9ffA\xcd\xbf\xa1e\xb3-\xdd\xaf\xbd\xbf\x90\x04(\x88\xfc-\xcb\xbf\xdad\x9c_07\xc2\xbf\xf6\xa7\xacU\x07\x89\xc3\xbf"\x91\xa5k\xf9.\xa7\xbf"F\xd1!\x06\x8c\x88\xbf\x0b\xe6\x05:\x86\x8e\xa8?\xbc\x1c\xac\xde\xaeZ\xc6?\xa6\xc4\x06\xb69\x19\x90?\xd3\xac*Y_\xdf\xce?9\xdd\x16\xcd\x89\xb8\xae\xbf\xf0U\x8e{Y1\xce?\xb4\xec\x0cjIb\xa6\xbf0\xbd\xbe\xe6\xfc\x00\xcd?\x12p\xd6@\x0cB\x84\xbf\xaa\xf6\xb69\xa3\x1f\xc3?\x12^Nid\x85\xba?k\xca\xfd\x8a\xef#\x95\xbfw^\xbfOZE\xd5?u\xba\x1f5\xf7O\xc9\xbf\x0f?SD\x880\xa8\xbf\x0f\xba\x89WU\xcc\xca\xbfO\xc1\xc8\xecWD\xb8\xbfE\xf8\xf9\x9c\x04\x13\xc8\xbf\xd9}Y`\xce\x1d\xbf\xbfI/\xfa\x07\xc7\xbc\x99\xbf\x9c\x9d\x96\x93\x8d\x96\xa4?-0(\x88\xc5h\xcb?\xee\x02\x81\xack3\xb8\xbf\x18\xbfS\xcbW\x1a\xc9?\x12\xec\xf6B\x16\xe1\xb2\xbf2\x98\r?>\xa7\xc7?\x01\xb1\x0f\xb4\xa5\xbf\xa2\xbfCjX+\xd2\xf1\xad?mZ8\xdbX\x18\xd2?eCt<\x146\xb6?\x81\xb0\xd1\xd5\x04B\x8b\xbfA\xb6-\x1e\xf5(\xb8?\x13\xc8\xe9Q\xae\xa7\xca\xbf\xd3M\x95\x1cld\xba?H\x16\xab`\xa3\xa0\xca\xbfJ\xe0\x89\tCD\xb7?X\xa2\x12\xc4\xd8\xe9\xc3\xbf\xc1\xc8k\x0e$\x05\xb8?*\x15aA\xbc\x8d\xb0?D\xaauS\xba{\xa2?\xb0{\xdd\x0cI\xcc\xdb?\xb4\xd6\xf5\xc5\x15=\xad\xbf\xa9[\xcd\xf2\xc8Q\xc3?e$\x1f\x0et\x19\xbe\xbf\xadp\xf5\x05\xbf\xb9\xa6\xbf\x00\x12\xe9\xed"y\xc1\xbf\xc6\xb9\x07\xab\xb9\x03\xb9\xbf\xbf^i\x8ez9\xc3\xbfD\xeb\xf7\x85\x95m\xb7\xbf\xd7\xbaHm\x99S\xbc\xbf\xf3\xb3U\xff\xcc\xe6\xb3?\xd2\xb6T8\x94\xf0\xac?G\xd9\xd8\xda\x1e\x8f\xd0?W\xac\x07\x1f\x99\xce\xbb?\x16\x8aU\xf4~\xec\xc5\xbf[U\xf7;\xd3x\xbe?ybA\t\xaa\xf7\xc5\xbf\xda\xf3\x83\xe7\xe4\xde\xbd?\xb9\x00\n\xc0\xf1\xab\xbc\xbf\xecA\xb7\x94\x96*\xa0?F\xb9\xbe\'\x04\x14\xd9?\xac\xec\xf2\x10\xecN\xbe\xbf0n6\x93i\xbb\xb8\xbfg(y>N<\xc2\xbf{\xdeG\xa8\xca\x03\xc3\xbf\x95\x01X\xc9\x96\xdf\xc4\xbf\xa9\xc4\x02\xfb\x932\xc2\xbfy\x83\x96\x93\xa7\xcb\xdb?\x08\x85s\xc0\xef=\xaa\xbf \xf6\x7f\xad\x87\x0f\xa0\xbf\rP5\'\xfc\x1d\xa1\xbfH\x1e\x13\x14h\xad\xd1\xbf\xf5\xfb\xf7\x0b\xab\x81S?-\x91\x87D\x96b\xd0\xbf\xf0\xf4\x07Mh\xa3\x96?0\xd6H\xd7K5\xd1\xbfi\xb9\x88@\\n\x87?.\x1c\xd2s0\x1a\x87\xbfS^\xb8\xdb\xd5@\x92?\x1bXzC\xe8\xa3\xdb?\x0f\xfc1%\x1aH\xa0\xbf"\x80\x0f\xaf/\x98\xbd?t)\xe4\xfd\xa3\xe2\x84\xbf\xe8\xfesU\x89\xe4\xa8\xbf\xe4XK\xeem\xb9\x8e?\xd1\xc4\xedl\x07\xcd\xb1\xbfc\xda\xbb\xf5\x18\xdd\x9e?!\x1e\x96:T\xb4\xa7\xbfmW~\x07u\xdf\x8d?\xd5_d\xf6\xf1\\\xbb?4>\x87\x8e\xac\xe9s?m\xe6\xeb\xe9>\xff\xd3?\xcd\x8dv\xcf\x8fs\x9c?\xbat\xcbh\xff\xa6v\xbf\n\xb8q\x94\x92\x98\x7f?\xbf\xe6\x7f*\xfc\xba\xa0\xbf$\x89\x1d\x9d=u\x98?(V\x9bv\xff\x9b`?g.\x15C>~\x86?\x90%y\xbf\xb3\x96\xd4?\xe3\xf2\xb5\x10\xe1\x89\xb9\xbfJ\x97\xd6\x9bS\xc5\xcb\xbf!!!!\x9e\xf1k?_ \x83qqW\xcf\xbf*s\xb9a]\xf0\x95?5\x8bbQu\xba\xcb\xbf}:\xc0\xe8\x86\xd9\x8c?E\xf3\x82\\1\xdf\xc9\xbf\x9b\xc8H\xf3\xcf\xef\x8a\xbf\x8bg7\xad6:\xab\xbf\x0c\xb9Nu\xea\xab\xac?`w\x8d\x01\xe2^\xb2\xbf\x8e|\xc2\x98k"\xb6?\xd8\xca\xdf\xd9\x87w\x91\xbf\x0eP\x903\xae\xafD?j\xc3\xed\xdc8\xbb\xb1\xbf\xa8\xed \x11G\xc2\xba\xbf\xf6\xa0\x97Jm\x18\xaa\xbf\xe5\x96#\x18\xc9K\xb1\xbfx\xe0K`\xf2\x95\xc8\xbf\xfe\x8b\x06\xbb&\xa3\x8a?\xf1\x93\r\xa0D\xd4\xbf?I\xfa\xc4\x07\x8d\x80\xa7\xbf\xce\xc5\xa6\xe4t\xab\xd1?\xa5\xb7\xe5N\xa7\x04\xb1\xbfY_\x8fJX\xd6\xd2?\x86\xa3\xd4h\xa8\x8c\x98?\xae\xed\xed\xdeHO\xd2?F\xf8\xb7\x04\xf3\xf4\xb1?\xb7\x06\xc5\xbd3\x19\xc1?\xf1\xf6\x17OF\xb6\xa9?\x15;\xddI\xc8\n\xc0\xbf\xdb03\xee\xf0n\xae?\x93\xf5\xf8\xdd\xcc\xd3\xcf?\xa6\xab\x0ff.\r\xb7?y\xc2\xb7:#\xbc\xd0?\xf5\xed\xf9\x92\xb4{\x90?\x11\xa4\xd6\xf5w1\xd0?\xc6\x1eh\x98N\xd9\xb5\xbf\x19\xebX\x91o\xe2\xbc\xbf"g\xd9\xa2\xc4\xed\xb0\xbf\xb56\x96XG\x00\xd5\xbf\xbf\xabS}\xe1d\xbb\xbf2\xf0+\xc6\xad\xa3\xd4\xbf\xf9Bf\xb1\rv^?\xfb\xc6\xef\xf9gE\xd5\xbf\xa4\xc3\xb2,\x1b\x96\xb6?`WC\x02E\'\xa0\xbf\x8e\xc8l\xff!\x88\xce?\xce\xd07*\xb0\xb7\xa8\xbfO\x16#+\x93p\xb8\xbf\xd9\x08\xcfd\xf0\x8fz?;Y\xc7\x15c\xbd\xd0\xbf\x83\xe4\xd9e\xb1_\xab?\x98\x1e\xef\x84\x0eP\x80\xbf22\xeby\xe0]Y?\x97\xda\xa4\xe4\xdd\xb4\xd2?\xbfao\xf9\xb5\xd8\x9f\xbf\xa8\xec\x9c\xb1CV\xc0?R\x94\xc7\xf8Y\x97\xa0\xbf\xd0\xa8,\x07\xe2[\xd2\xbf\x16,\xadR\xa2\x07\xb0\xbf6\xbf\xdd7\xfe`\xc0?\xe7\xcf\xfe\x96\x93\xf6\xab?\x92r\r\x19\xd3F\xd2?x\xff\xb1(><\xbe?\x1c\x1f\\AN\xc6\x92\xbf\x93Q\xcb\x1e2\x07\xb1?\x0f\xc2\xf9\x06\xf0g\xd1\xbfk\xe1N\xd1l\xd8\xaa\xbf\xaco,F\xf7A\xbe\xbf&!\xf2Z\x9d\xcc\x88\xbfG\xa9\xe6\xdc\xa6|\xcd?_7\x90\x8a\xa5\x0f\xac?!\xd8\xd4\xd9\xfc\x0c\xd1\xbf6\x8bo\xdbxF\xb8?\x9aC\xbb\xdaV\xb8\x86\xbf\xb1\xbf[|\x98\xc0\xab?\xd0f\xe2=YS\xd2?Y\xabgp\x8a\xd7\x90\xbfv\r\xf8r\xd7h\xd0\xbfk\xcd\x98\x88\xa6\x8e\xb8\xbf\xd9\xd7\xeco\x9fP\xd2?{\x9d\xcc\xa0\x92\xb0\x9f\xbfL\x0e\x98\x0c\x87\xca\x8b\xbf\x0c\xd8m\xe81<\xb7\xbf\n:\xe2\xb9\x06\x17\xd1\xbfDu\x87<\xf5\x87\xc3\xbf\x19\xc8\xd7$\x93\xbf\xb0\xbf0\x9f4\xb3H\x87\xb1\xbf\x8e^t9I\xf6\xcc\xbfs\xd3\xf9\x10\xa2\x89\x8d?V\x08\xadwN\xe9\xd0\xbfP\x80\x92e\xdc$\x91?^={\xc5\xca\xbe\xcb\xbf\x19\x91\xc3\x8b^\xaa\xa4?"a\x8b\x12\x06\x86\xd0\xbf\xacxjPh\xd9\xb6?\xd0\x8d\x99\x1b`\x14\xc7\xbf\xc2\\T\x9b\x97\xfa\xc2?\x16\xb7+\xfb\xa4\xce\xb3\xbf\x92\x8b\xce\x0b\x9c\xaf\xc5?\xeeE\xe2Z\xf7+\xc9?\x18\xeaK\xb4R\xbf\xaa?\xfb\x85\xee\xffj4\xd6?>\xf8\x17\xe1\x15\xecy?\x04Dn\xbf/\xa3\xd8?\xed\xb4}\xc1\xd8\x8c\xa7\xbf"\x0c\xaai$\xc5\xd5?D:R3\x8b\r\xca\xbf\'\xbd\x908\xd3\xab\xc6?\x02\x07\xfeC\x95u\xc1\xbflzj\x1d\xf5Q\xb5\xbfQ^\x19\xa9\xdbj\x95\xbf\xac\xf8\xb4\xe6\x1c\xd6\xbb\xbfn\xdf\xb2\x88\xccR\x8d\xbf\x8d\xb0\x85TWq\xb5\xbf\x85\x84\xaa\x97\x89\xb7\x80\xbf\xea\xeb\x80\xa0g2\xbc\xbfg\xe7\x97\xd3\xf5\x99\xbc?\x8f\x81n\xdb\ns\xb6\xbf\xdet\xcbuP+\xbb?\xf4\xfc\xbez\xa0\xde\xb5?\xe1\xd2\xdc\x15\xf2\x9dp\xbf\x8d\xb7\xa4\xaay\xbb\xc0??\x05^\xf6Y\xb7\xb9\xbf\xb6>\x82\xb7(\xa5\xb9?\xb3/\xbc\x90\xadAp?}7\xd7_5\x05\xbc?\xf7\xaf\xae\xbd[\xfd\xc3\xbf\x0b#\xaaB\x1c\x81\xcc\xbfN\xa2[H\x94\xc2\xc8\xbf\x93\t\x03Fs\xea\xb4?-N\xb4_\xb3\xac\xc9\xbf\x1b\xfd\xe31RU\x85?\xe6\xc9\xb7\xdb>!\xc8\xbf\xae\xc5n\x00\xf9\xaf\xb8\xbf\x1fd\xf2[5\x15\xc7\xbf\xf1\xf8\xe3\x18CZ\xc7?\xf7\\H\xb9\x94\xd6w?\x91-h:l\xc8\xb7\xbf\x0c\xad\xdbJ\x05\xb7\xd1\xbf\x06\xa9\x1d\xda\xf9\xc9\xc1?\x8c\xdd\xb1\x1b\xea8\xa8\xbf\x05\xf7\xab\xaa\xe2\xa7\xb6\xbfE\x95\x19\xac\xd5\xa4\xa3\xbf\t\x0b\xb6\x86\x80\x8b\x89?"\x9d\xe5\xff\xef>\xb0\xbf\x88~J\x05\\G\xb1?etI\xed7\x96\xd3\xbf\x03\xea\\\x19`R\xc4\xbfw\xdbPQ\xf9\x01\xa4?\xc0\xdd\xcc\x0f\xd6\x0c\xa0?Z\x8bv\xd8`\xf3\xd5?Re0\xd8\xd9 \xb7?\xed\x9d\xdd\x18di\xd3??\xf8\xc5`@\xb2\x9d?)P\xc3\xa6]Q\xd5?\xb2\xdd\xdc\xf1\xcaI\xb5\xbf\x989\xb2j\x94\xb0\xa1?\x94\x80 #$\x9f\xa0\xbfI\xd4<\xf8$X\xca?\x8an\x026!$\xb8\xbfOt\xef\xdb\xf1\xcd\xc9?\xeaX\x99\xc5!\xaa\x95?\x84\x97\xecy\xb8$\xc7?\xc6\xa3\x16\x85\xc8*\xb8?\xe1WA\xae\x1f\xd6\xbc\xbf\xde\xdbw?\x05\x0f\xb2\xbf\xefC\xa2\xd7C\x82\xc5\xbf\xe8\x0c\xd5wU^\xd8?\xeb\xb3\xca$\x18\xa5\xbe\xbfy1`\xe7\x1d\x8c\xc1\xbf\xc7\xf6<$\x82\xf1\xc5\xbfU\xef\xdf\xce\xbb\xfd\x94\xbf\xcd\x08\x8c<X\xaf\xbf\xbf\xff\xfc\xe0\xf9I\x11\xb2?\x8a\x889\xd3Pq\xc3\xbfo\xca\xd5\xc2_h\xd1\xbf>e\xd9\x16k\xb6\xb4\xbf\x14\xff~(\x8a\x91\x89?f\xb2\xebW}6\xce?\x9c\xcb\x84F\xd8H\xc5\xbf*\x19\xeb\xf0vQ\xc3\xbf}\xe3d\x8e\xe5)\xbd?\xff\xac\xfe\xb9\x7f"\xc8\xbf"\xa5U\r\xff\xbc\x92?\x10\x04A\x12\xc4\xeb\xc5\xbf\xdd\xc3\xb5\x1a\xe8\xdb\xb5\xbf>\x0c\xcf>\x9d\xbf\xcb?v\x9f\xc2t\xe6\x8f\xd1?\x19\xaf\x95\x97X\xcf\x85?\xd2\xba}\xd3\xf8?\xc6\xbf& \xee\x96}\x8b\xcb?\x82q\xbd(\x9e\xef\xc3\xbfB\x89\x1a\x7f\x93B\xca?\xe9^\xb1-b\x88\x7f\xbfJ\xa3+]\xb7\x00\xcd?\xd2\x97\x9a\n\xd6\xdb\xbf?\xf6\xcb.\xc7G<{\xbf\xcc\xcc\x8b\xc4\xeaE\xc2?\xf1\x16E1T\xce\xc1?\n~P\xedj\xfe\xbd?\x89\x0b\x8f\x1b@R\xb2?.\xb7\xfe\xff5\xd2\x85\xbf\x87{\x81\x16\xc9\xeb\xbf?\xc8B\xda\x19\xf2\xd9\xc3\xbfz\xff\xd4\xc5\xd8\xa1\xb8\xbf\x90\xbcs\x9e\x0c\xde\xcd?~\x9aA\x16\xb4\xda\xb0?\xbd\xcf\xce":\xec\xcb\xbfx8h-\xb52\x97?\xeb\x82<\xe5\xa9-\xbb?d.fx\x92\xd8\xc1\xbf3\x8d\xa0D\t9\xac?\x1eh\xa9Pz\xb7\xb1?k \xbb\xb4\xf3\x03\xb1\xbfW\xab\x90"P\x02\xb8?\xab\xd7\xde\xec\xdc\xff\xb9\xbf\xdbW\xff\xeeM\xc5\xc6\xbf=\xbb7wK\xcb\xb3\xbf\xe1\xceg3\xd96\xdc?\xd9(\x1bs\xd7\x1f\xbf?\xdfW\xd4gF\x85\xc0\xbfWkw\xd9\xbc\xeb\x8d?\xe9\x98\x03\xb5\x11N\xd2\xbf\xe8\x1a\xfd2\x9e\x1e\xb3?h\xb0\x86I"&\xbc\xbf|5\xc5\x8e\xb4\xad\xba?Yp\x7f\x99\xbe\xbd\xd1?t\xd3\xcd\n,v\xe1\xbf\xbc>\xdb@\x8c\x02\x91?\x94#\xa3g\x8c\x93\xb9?|\xec\x02\x06U0c?\xc3\x9aZ1\xf7\x07\xae?\xfc\x05\xcaN\xef\xe7\x8d\xbf\xd3;h\xac\\\xfb\xaa?\x835\xbf\x15\x0b\x81\xb0?\xc6}\x8f~\x8dJ\x9d?\xf0\xf5(\x8b\xc7\xff\xad\xbfF_\xe0\x83\xee|\xa3?p\n\xa6\xaei\xfd\x9a?q\x1f\x08e:\x99a?\x9d\x01\xb2\x86W!\xc7\xbf\x03\xb8\x95\xf1\x8b#\x97?\x906m\xfc\t\xf8\xbc?\x8e\xfc \xe9\xd4^j\xbf \xc6\xd4\x9d\xc9\x19\xc9\xbfp\x05B\xea\x14\x9e\xa8\xbfg\xe5,p\xf3\xa1\xbc\xbf\x93\xbd\x1d\x14vF\xc8?J\x13\x969\xaf\x90\x8b?\x16*\x1a\xcfOg\x80?\x18\xc7\xd3\xd4\x1b\x94\xa3\xbf\xa5\x88\x04S\xe4\xba\xc2\xbfXCj\xf8\xe9!\xab?\xc3\xcc\xd8-\xa6\xf7\xb9\xbf\xaaS\xc5\x94W1\xc1?\x87\x00:T\x18W\xd8?4j\x86\x9e\xa3\xd7\xbc?\x8c\x06\xa3.\xd7S\xc7\xbf\x0f\xf6}\xfd\xf1\x08\xce\xbf\x0c\x02\x08\xfb\xc7\xee\xda?\xc6\xf9P\xd0\x81\xa9\xbe\xbf\x0es3\xb9MG\xa3\xbfz\x19J\xd8\xc3\x1a\xbf\xbf\x935V_/\xcd\xb2\xbfEX\xe6G\x1c\x9f\x98?\xed\x05)\xf41\xdb\xb7?cBQb\'\x99\xe0?\x06\xe1z\x9c\xa67\xc0?\x84\xa7D\xaa\xf0b\xba\xbf:\x0e\x13\xa2\xac \xb4\xbf=\xd7\x91\x8a\xdc\xcd\x97\xbf\x85t\xc7\xaapHg?\x05\xd6\x13\xf0_\xa2\x85\xbf\x88}t\x8d,\x8d\xc1\xbf:`(&\\|\xa0?\xc7\x1a\xac%\x94m\xaf\xbf*\xd4T|G\x89\xb3?n\xf7\xf4\xda?\x9b\xc3\xbf\xf4"\x8b\x88\x98\xde\x8e\xbfw\xeey\xd2\xa4\xfd\xaf\xbfZ\xa1\xfe-\xbd%\xb1\xbf\x19!\xa0\xabZ\x81\xc1\xbfTOm\xe4,\x8c\xb3?\xd5H6%\x9d\xe8f\xbf\xcb\xfb\x15}\xfd\xfd\xcb\xbf\x98d\xce*\xfe\x82\x8d\xbf\x06_*z\x8fCq?\r\xe6~\x00"\x86\xca\xbf\xcbmw\xf0\x1d\xf2\xb3\xbf\xdaS\xf5+\xfc\xf2\xab\xbf\xee\xda\xd1E\xbdV\x8e\xbfg\x90\xc0\xfe^\xb7\x9e?P\xaf\xce?{{\xa4\xbf2\xd6,K\xfd\x9ek?\x15\xf4=\xa7\xbc4\xba?\x9d\x1b5\x19\x0fr\xd3\xbfz\x91q\xd9H|\xc5?.\xe6k\x84t\x89\xd4?\x10\xc91\x84\x12\xbd\xdb\xbf\xb5?mBy\xd9\xd2\xbf)\x86\xfcI\xd2\x14\xcd\xbf\x7f\xe2\xbd\xb0\xe8\x14\xba?!(\xa4\x8a\n\x10\xa4?M\x7f\xf9\x12B\xbc\xbb?\xe2QZ\xc1\xd2\xd7\xc6?\x003A\xca\x0e\xcb\xa3?`\x89\x15\xd0}\xd4\xd3?7\xd4\xe3>!+\xc1\xbf\xdf\x98\xa3\x9dj\xc0\xc3\xbf\x17^U)jj\xc9?\xd3UO\xeck\xa1\xa1?\xc6\xef\x0e\x87\x1f\xde\x82?\xf2,\x8b2\xb6\xc8\x97?4\xd3\x0f\x1c\xb6\xbb\xb5?"\'|\x8c\xca!x?E?\x92\xb6W\xf7\xb0?W\xec\x9e\x91=\xec\xc8?8\xbb\x17\x89&\xfd\xae\xbf\x17\x97o}V\x88\xbd\xbf\xc9Q?M*\r\xae?\x90@\xe0\x8ds\xcf\xb1?\xc0\x12\xca\xce\x8b~\xac?\xda\x82\xf5,\xd8r\xc3? }T\xb9\xbf{}\xbf\xfb\xc5;t\xd7\xd3\xb3?7\x05\x02\xbd\x1b\x96\xde?J\xf2}\x03i\xeb\xa2\xbf\xc2\x83\xe4\xd4\xd0\x8d\xb4\xbf\xcb\xbeQ\xcfLQP?\xb3t\x8e\xa5\xc8ji?\x10\x1b\xab\x0eO(r\xbfC\x97\x9eg\xd8\xe7~?\x02\r\xf3[\xfb\x07\x9d?\x0e\xb9\xb7X#)\x84?a~S\xeeV5\x92?\xc2\xcb\x1f\x98\xc1a\xb4\xbf9\xf6T\x84\x19\xab\xc6?\\}\xd6+P\x0b\xd6\xbf\x82\xe7\xee\xde[!\xc7\xbf\xf1v\xa6\x18\x81\x99\xbc\xbf\xba\xf1\xdd\xf1\xaa<w?e\xcf\x1cc\x00!\x98?I\xe9Y\x11\x1c4\xb6?\x1dE[\x8a\xa1u\x99?P\xd1\xf4:\xb9\x9a\xc1?\x99\rfE\x97W\xa2?Y\xe2y\x1d\x1c\xa2\xb1?\x9b\x83<\xbbV,\x98\xbf\xfc\xcf\xdciA\n\xc1\xbf\x9f{\x13\x97W\xb8\xdf\xbfJ\x8fl\x19\xaf\xc3\x92\xbf\x83\xecW\xc7[7\x84?C\xeaz\xd1|M\x93?\x01\xa6w\x13W\n\x88?\xff~8\x88`\xf0\xa7\xbf \xf5$\x83\x82\xd0\x9f?\xdd\xe4\xe0z\xdd\x84\x91\xbf>{c{.W\xdf?9G\xd5[Q\x92\xb8\xbf%\xdc\x84\x93sN\x87?Lo\xf0pD&\xaa\xbf6\x0e\xc5\xa5\x0e\xc2z\xbf\xdf\xdd`\x92{\xdc\xa1\xbfq\xef^X\xe82v?S<\x8c\xce\x03f\x9d?X\xe0\n\xaa\x80H\xa6?o\x1c_AJ\xaf\xcb\xbf1\xd1\xb5l\x8f\xee\xda\xbf\xcd\xf6\xa3U\xdc\n\xc6?\x9b.\x7f\x19.\x02\xb0\xbf$^\x84X\x0fw\xd1?F\r\xaaB\xe4\x80f\xbfkm\x18\xdf\x1d(\xaa??\x1e\x92\xd6\x0c\xea\xc0?\x95\xef\x8bj>\xab\xde\xbf\x02P\x9f\xc62\xb6\xca?Gk\xbb\xaeR8\xbc?\xe0\xa6\xe1\xdb|\xed\x82?\xbf\x9aP\x9d\'\xbc\xcf?H\xbb\x98\xe5[\xb2\xc5\xbfC\xad\xb2\xb3\x9a\xba\xc1\xbf\xcc8\x97\x98\x98\xa9\xbb\xbf\xd6\xb6\x8852\'\xbb\xbf\xdd<\xf1\xe2\x11\x15\xb3\xbf\xb17\xfe\xea\xdc\xd2\xb1\xbfx\x1c\tx` b?=\xd7S3\x83\xc8\xce?AlRy\xdf\xad\xcb?\xb7L\xb0E\xc1\x93\xc3\xbf\xbc\xb6\x87\xd7"K\xae?lP\xb9\xee\x941\xbe?\xd0\xcc\x90\x9c\xb2>\xa8?\xea\x9c+F\x91mq\xbf\x7f\x82!\x8e\x0f\xa7~\xbf\x94\xdbfl\xb5\r\xb0\xbf\xdcN\xed\xec\xff\t\xa2\xbf\x10]\x13\x13Q}\x9a?\x90\x92s\xaa(?\x9c\xbfK\xd0\xa5Q\x17p\xb0\xbf\xb4\xdd\xee`TW\x88\xbf{\xc8\xa5)\x89\xc3\xc0?\xfc\xee\xab\r\xf7n\x9e?st\xac\xb4.\xc2\xba\xbf\xb0\'<\x9e\x10\x07\xc6?VR*\xbb\xc1\xc0\xaf\xbf\xb5%\x97ln-\xb5?h&lK\xb0)\xd7?8\xb0\xa7\xfb\xcc!\xd2\xbfn\x07\x9c\xb8]\x00\xd1\xbf\t\\"\x1f@\xfa\xc1\xbf\x86\xa8\xd8e\xcf\x8f\xb8\xbf\x8aJG\xcd\xd7\xe9\xb4?\xa1\xc9P{k^\xcc\xbf;\xcdB\xcb\xac\x82\xb5?b\xc5\xa1\x87\x18\x9b\xde?R\xb6\xeb\xa7\x06\x10\xd2?G\xd1\\4-\x17\xcc\xbfn\xf7\x0e%\xb8\x13\x9b\xbf\x84\xfd\xf0;\xfa\xf4\xb9?\x91S\xf6\xa1G\xb7\xc7\xbf<$\xe6\xb7V\xdb\xa9\xbf:\xeaF\xa0m\xdd\xb2\xbf\xbe\x960\xbdo_\xa5\xbff\x15h*\xde\x1b\x91?\xe5O\xc8i\x16\xc6\xb2\xbf\xd1?\'7\xe3\xa3\xbb\xbf\xbfd\xd4H\xbf\xb9\xb6?\x17\x8e\x7fn.6\xd4?\x01\x16aOI`\xb5\xbf\xc8!\xbd\x15;T\xb4\xbfE gj\x93J\xaa?\x98\x95m!\xda\x94\xac\xbf\x1dIO\xabcV\xa5\xbf\xf3\xf1\xd0[z\x1b\xb3?\x9b\x9c\x0c2\x8es\xa6?m}\x13\x8fT\xd7\xb4\xbf\xff\xd0\x08\x8d\xaf\\\xaf\xbf\xf8\xd3\xebZ\x1c\x1c\x9e\xbf\xf6\xe2\x07\xd4\x15\t\xa2?\x91h\x87Sh\x86\xa2\xbf\xe6\xbf\xfe\t\xfe\xc8\xc0?\x17\xff\xc9\xce\xa4,\xbb?\xa0k\x1e\x88O=\xb3\xbfP\xab\tg\xb8]\xac?\x032\r\x1f\xaa\x05\xb4\xbf\x15\xa9\x19\xcb\xc6\xda\x8c\xbf.\xee\x18+p(\xc7?\x8b\xd0\xc0\xb3\xb6\xe9\xb2\xbf\x18\x8b8(\x92\xbe\xaa\xbf{F5n:2\xc8?\x9c\xb9`\x8e7\xbe\xa4\xbf\x81\xed\xcc\x17F\xf3\xd8\xbf6|\xae6.\xa5\xc2\xbf8\xad\xc6\x9b\xed(\xc8?&F\xdd\xa0\xea\x1e\xad?2\x14m\xe0\x9ai\xb0\xbf\xe4$\xd1\xcb\xfc\xc2\xa3\xbf\xb5^\x9fC\x83Qz\xbf\xf6 \x06\x83\x88p\xb4?\x99W\x85V\x03\xbd\xb3\xbf\xf1U\x98%\xe4\xa2\xb0\xbf\xbf>\xbf<\x92G\xc3?\x84fV#Th\x8c?\xb2\xc2w!\xe6\xd1\xd4\xbf\xa2\xa9\x84\xf5\xc5\xa8\xad?zi\xac\xe5\x07F\xc9?\xd4\xdd\xfe\xd3\\\xd3\xbb\xbfLt"x}k\x8f?\xa4\xcf}y\xcc\xe3\x96?\xc4\x9cXx\x01\x14t\xbfl\xa2\x9fd\xaf\xbc\xb8?\x99\xc1\xe6\x10\xff\x84\xca?.\xd3\xf1\xd0\x9d`~?\xd1\xee\xc5f:;\xd9\xbf\xdf\x8a"q#6\xaf\xbf\x94\xf7]\x0b\xb5\xcc\xcb?\xf0\x8a\x9f\x18\x03\xe1\xa6?0P\x8f\x13\xb4\x81\x90?\xffmm\xd7\xe5\x8b\x9f\xbfG\xc4\xdd\\\n\x1f\xd0?e\x95\xb3\x02\xea\xc2\x93?cm\xf98\x81\'\xd5\xbf\x1c\'\x89\xbf\x03\xea\xa4?q"\x97]Y\x99\xcf?\xde\x86\xc14\xf0p\xd3\xbfE\xcb\xe1\xdd\r\xcd\xc9\xbf7%A\xc47T\xe2?\xce\x806\xa4y1\xbb?\xbe)\xb4h\t\xc6\xbe?\xb1\x88\xfa\xa1!B\xa2\xbfC0\\\x11\x04\xaf\xb4\xbf\x8a\xfb\xeb"\xad/\xaa?\x06\xbb$S\x94\xd7\xca\xbf\xe1\x132\xc4\x0f\xaa\xa5\xbf\xa0\xd48\x9d\xcc?\xd4\xbf\xe2\x8d\xae\r\xb4|\xa3?\xcd\xc5nh.&\xcd?\xc0Ga\xe4W\xea\xd1\xbf\x18\xbd\x99D\xa6\xd8\x8c?G\x18M\x1cp\xde\xaf?lAK\xfb\\&y\xbf\x9e\x89J\x08\xdf\xb9\xa8\xbf\xa8\xfe\xcb"\xf8\xad\x84\xbf\x90m\x8e\x0e\x10\xab\xa3?\xf3\xf8\x88\x96\x80\xaf\x99?\xb2\x9f\x12;h\xd0\xb3\xbfF\x93\xfd\xfd\xa0`\x93\xbf\xe7( \xe2<L\xb0\xbf\xc1m\x17\xf7\xe3\x04\xb3\xbf\xbc\x19.}*\x80\xcc?T\x18\x1b\\i^\xc0?\xe5\xf7pz\x98\xc7\xb3\xbfO\x88\xbam\x96\x82y\xbf\xfd\xfe)vG\x02\x97?q\\\x8cd\xafz\xc2\xbf\x9d\xc2\xd6G\x02\xf2\xb7\xbf\x0f\x00U\x93\xd9\xa8\xb8?\xed\xa2\xbd\xed^\xfd\xd1?v%\x94\xc0z\xfev\xbf4$\xa8N\xc11v?\xfe\xf1\xd8\r\xd5p\x89\xbf\xd0\xeb\xe2\xf6A\xc0\xb9?\x1bo\x15)_\x11s?\x82L\xebD<\xcb\x81\xbf\xa8\xfb@{2\x9c\xb0\xbfLAt/6g\xd2\xbf\x0b\xfc\xca\x8b>\x1c\xd4\xbf}\x92\x080\x8ao\xc1\xbf6\x118\xfe_\xf5\xa8\xbfNI\xf7OW\xda}\xbf\xd2\xc8\x0e)I\xa4\xfb>\xbe\x07m\xa7\x93\xe2t?\xcdh\xa8,\n\xbd\xc3?\x03hh\x18\xf2\x15\xb3?d\xa4\xc0Y\xd4\xc0\xc4?v\xde1\x08\x1f\xf8\x94\xbf\xf2s-\xe0\x00\xd9\xcb\xbfra\x11":\xcc\xd5\xbf\x92Y$\x1d\xc3A\xc1\xbf\x9fy\xfe\xa6\x1bR\xbe\xbf\xe6@\xa9\xe0\xdd\xc9\x90?\x9d\x96]\x0b\xfe#\xab\xbf\xa7\x01\xafEvO\x9d?1\xa8\x1fa\n;\x9a\xbfc`n\xe4\x9c\xa8{?<8\xd8\xa7h\xa3\x9c?r\xab?\xb5\x8c\x11\xb6?&$\xaa\x17\x94\x0f\xa0?R\xae\x80\x15\xc1\xd4\xd3?V\x12\xff\xd1\x1ei\xd5?\xed\xf4\xd211l\xb3\xbf\x08\xce\x01_\xfb)\xb5\xbf,\xac%\xe4\x14T\xb7\xbf^\x0f!\xc2\x03\xe1\xb9\xbfI\x95&\x18_5\xbb?@$\x8e\x82\x14\x1f\xb1\xbf{hE.\x991\xc0?\\P\x8e\xa5\xccG\xdb?\xe2d\xb6\xae\x7f\x88\xc0?Jw%\x8c\xf6h\xba?\xc3Yl\xadr\xee\xa9\xbf)\xbb\x82>oo\xb8?\xa0l\xe3\xfa\xc9k\xc0\xbf{\xdb(JP\x1a\xc2?\x99\xf5\xber\x9dS\xd5\xbf\xaa\x87\xf0\xbb\xe09\xcd?K\x0e\xbf\xa5\x87\xbb\xb5?\x82\x8a\xd0+yQ\xb3\xbf_\x90\xfds\xe1+\xbf?\xcc\x12h\xae\xe88\xac\xbf1\x08&r\x9as\xb2\xbf\x8e3~\xd9\x954\x9d?\x981P\x149\xed\xad?$<\x1b\xc3Q\xf5\xa3?[\x8fl\xfe\xac`\xc9\xbf8M\x88b\x968\xa9\xbfR\xd2e\xc4\xe6\xa7\xd1\xbf\x03\xd1\x8f\xd0z\xff\xbf?\x1b\xe6\xf2~t5\xb5\xbf\x1c\x08\xfd{48\xb2?Y+\xc0.\xa3\xe0\xb3?L\xfdR\x83\x01\x07\xa3\xbf\xd3/\xfd\xd5\xe2\xd8\x95?\xce{\xd1\x1fG\xe1\xa1\xbf\xf8\xa7\x82\xa2\xad\x16Y\xbfY\x84\xffX#I\xb3\xbf\x1a\xa8]&N>\x96\xbfd\xa1\xc4!\xcf\xf9\xba?/\xb8u\xa0\x0f\x9e\xd8?\xbeG\xf9\xb7v\x11\xd1\xbfG\xc1\x0e\xe3@\x08\xb4?\x882\xc9\xd71\x01\xaf?\xe7\xe6\xecR\xcc\xa9\xcf\xbf\xb7dP/\xe3\xd3\xb7?\xbf\x93\xcf\xa4\xdfRw\xbf7\xb0\x85\x93\xf2r\xa6?\xfcA\x8ab~w\xe0?\x1b\x9fF\x9ay4\xc1\xbf\xd2\xebg\xed%\xbe\xbb?\xc2\xbd\x91\xcdOe\xb0\xbf\x1aN\nv\xdb\xfd\xc0\xbf\x08\xd0\xd6,\xa4\x95\x86\xbf\xdf\xc7\xe1W\x94\x1e\xb4\xbfp+\x0f\x1by\x9a\x81?\x95a\xb66\x8b\x83\xb6\xbfc\xa7\xbd\xc0)\xdd\xa3\xbfF\xdc\x06\x87\x9d\xd4\x9c\xbf\x80\x03?s\x85h\xa9\xbf\x04\x84\x0f!\xd5\x91\xc7?\xd0\xf5\x1ae\x89\x85\xc2\xbf\x0bJ\xc9\xffl\x18\xd3\xbf\xbc\xd7\x1cF\xb7\xb5f?]\xea\xfb\xe5\xb7\xba\xc3?\xc7<H\xe0\xdf\xcf\xaf?\x92qk,J]\x8c\xbf\xb6*\x93\x91\xe6K\xd5?\xc2+N\xfat\x04\xb5?\x85w\xf2\x07\x14\x1a\x8c?l\xffp\x03O\x01\xb0?\xef2\xa4\x81\xe1g\xd5\xbf6\x03\xd8\xa0\xa6`y?\x06\xd6\xfe\xfc\xf1\xc4\xc3\xbf\x0f\\sb\xe6\x83o\xbf\t\x9d\xc7}r\x00\xbf?~\x05\x122m\x9e\xc2?\xda\xd9\x1b\x93\xd4\x8a\xd7?S\xcfFExR\xc6\xbf\xb3\x90\xb3\x08O\x9b\xbc?\x1e\x97["\xa4\xac\xb9?\'\xa8\x00Y\x98\xa0\x9b?-\xf3\xd3\xbe\x9a\x1b\x7f\xbfJ\xc7}@F\x00\xbe\xbfK\xbc\x91c\xee-\xa0\xbf(:\xb8\xb0\xd5V\x12\xbfZ\xeeF6\xdaP\xa4?\xc0^\xb2\xf4\x91\xbd\xc3?\xda\x12\xa9\xf0\xd4\x95\xb7\xbfJ\xc5J,\xa90_\xbf\xc2\x11\xe0%\xfb(\xbf\xbf\x8el\xba:\xe9\n\xa9\xbf/A\xa6\xa6\xf3#\xd2\xbfA\xdc\xb6FX"\xb9\xbfvc\x87\xc4\xe6\x95\xd7?\xe9\xc7^0\x0c=\xca\xbf\xcd3\xd0Rw+\xc3\xbf[L\xa1\xef\xe8[s\xbf\x9b\xbf\xf8Nf\xec\xc6\xbf\x990\xb6R]\xe0\xbd?\x89U~\x9eH\x0e\xbe\xbf\x93\xf1S\xd1&\xaa\x94\xbf\x0c/\x88\xddI\x82\xd5?\xec\x91\xda\xd8\x91\x81\x9e\xbf{7\xca\n8V\xc7\xbfF\x15\x10\x05\xde\xde\xc2\xbfw\x99)\xe7S\x92\xbc?\x84\x0f\xee:l\x06\xbf?X.2\xd0\xe6\xb9\xc2\xbf\xa8\x93{/\x15\x94\xbb\xbf\xd5I`\xf7\xb4\xf7\xc8?\xc0\x8cp`\xe2#u?l\xc5\x89\xdf\xf9\xf5\xc7\xbf)\xb6\x1e`\x8b\xea\xb1?\xa73\xf2\xbe\xe6a\x9f?\t\xbb\xae\x7f\x1f*\xba?\xbc\x03bb\xd2\xed\xb3?\xc3\xb0z\xb8\xces\xa6\xbf\xc7\xe9\x85\tw\xbb\xc2\xbf{\x1d \x91\xa8t\xac\xbf\x8e\xb4\x19\xc9\xfb\xd1\xcb?\xb9\xfbV0\xd3,\x83\xbf\x03\x16U_\x1e\x15\x8d?\xcf\xc4\xf9\x8c\x08n\xa9?\x85-\xa4\xe9\xed0\xc1?\xee\xb4\xa9\xa6\xde\x91\xb4?\x8c\x99mFM\xf4\xcd\xbf\xfe#\x05\xfb\xd9\xde\x9e\xbf\x90\xdb\xc0\xc1\xa0z\xcb?)\xb9\x9fu\xf0K\xb5?E\xe1\xcf8\xd2m\xdf\xbfX\xe0\x1a\xdc\x02\xdc\xbc\xbf\xbb\xe1\xf9q\xbbk\xcc?\xbc\x07\xdf\xf5\xe8\xb8\xa0\xbf\xcdp\xcfx\xa9\xe6\xd1?\xd7E\xf3D\x08\x8c\xa1\xbfQ\x1c\xff\x02\xdf\xa4\xc3?5AE=\x97\x10\xb8\xbf`fY\xf4\xe5\xbd\xba\xbf\x93\x85s\xe0\x0fM\x99\xbf\xd4\x19\x1c\xc8\xdf\n\xd0?8Fu#\x96\xbb\xb7\xbf\\\x04\xbb\xa7\xd3\xe7\xd5\xbf\x0bL~\xefk\x0e\xaf\xbf\xc7R\xf0\xaf\xac\x01\xbe?\xa8\xe6T\x02\x00\xd6\x96?\x95\xed1\x7f\xab&\xbc?\x8d\xed_\xc7\xd0H\xb7\xbf\xb6\xe9\x96\xe6-E\x8d\xbf\x81\'\xea\x0bc\x93\xb1\xbf\xb0\xdc<\x85\n\x83\xb1?\xd9\x8c\xf1\xc4\x0bn\xc0?.\x98\xe2\x16\xc2\x0e\xb9\xbfZ\x88\x8d\xc7\xd3\xe5\xc9?\xbe\xbb#\xb7\xc96\xa2?"\xd9\x15_\x93n\xab\xbf\xeeY\xf3\x12e\x9e\xb8?%1\xb6:\xad\xc2\xd6\xbf|(\xa1\xb1\xfe\x10\xb8\xbf\xb0\xaa\xb14\xb0\xe1\xb9?iq\xfa\xc0\x9e\xa7\xc9\xbf\xff\x02\x0f"\xa4\xcb\xb0?\xea\x11\xa8\xfe\x94\x19\xd2?d\xfc\x82\x83at\xcc?4\xcdA\xcf`\xc6\xd8\xbf\x16\x90\x8e\x1d8\xdf\xbe?\xa9+Vz\xbbh\xc1?\xba\xf9{P\x14\xb6\xb4\xbf\xf4G\xd6G\xeb\xa1\x7f?\xcf\xa5 \xc1\xbbV\xb0?M\xee\xf4\xf0\xef\xae\xcd\xbf8\x92J=-\x83\xbf\xbf\xdeA\xa3\xf0\xb8\xac\x9d?\x0f\xa4y*\xcf_\x9c\xbf\x98 \x1e\x08]\xba\xc7?\xca\x04P`b\xb6\xc2?\xd0k\xa5\xb5p\x86\xa4\xbf\xf0\nt\x88\x1c\xe3\xba\xbf\xd7\xffT\x9bW\xa0\xbd?L=\xd9`\xb8\xb8\xd1\xbfbo<EK\xe9\xc0\xbf\xd9\x99\xf9\xa3\x15\xb0\xb4?\x8b\xbe\xf6W\r\xad\xbf?\xf6h\x1d$ \xc6\xb6?\xdc\x1a0\xdbC\x8c\x91\xbf4J\xf7H\x9b\x95\xba?%\xa9\x0f\x0b\xb1 \xb3\xbfe\x1c\xaf\tO\xcd\xcb\xbf\x90\x1aS\xe7\x1fi\xa3?\xee\xcf\x87P\x8b|\xbe\xbf\xab\x0eFl~\xeb\x9a\xbflL\xfd\xd8\x0c\xa5\xc8?\x8ag\xbd\xbb\xfc\xcb\xc5?B\xe0\xef\x07\xf5\x14\xb9?5\xf5\xb0\x0e]\xbd\x97\xbf\xdb\xa5t\'\xf8\xed\x80?\xfc\xb1\xb4\'t\xa2\xc5\xbf\x93\xa2{\x93\xf2n\xb3?\xbf\xc9h\x886/\xa2?\xc4\xd6\x95&\x16g\xd0?\xe9\xb5\x8f\xd8\x9c\xccW\xbf\x9dq\xe3\x03D\x86\xb9?\xd8\x0b\xcdG\x0f\xe5\xd0\xbf\xc2\n\x8dHq\x00\xcb?QTxU&&\x89?m\xa7eNt\xcd\xc6?\x15\xc4\x97\xa2\xd5\x1a\xcf?\x85\xc5\xca\x16\\\xda\xc6?\x06Pu\xa2\x83\xa8\x97\xbf\xe2\xf8\xd5\x9b}U\xbc?@?\x02\xac\x1f~\xb7\xbf\r\xdc\xa8j\x17K\xd3\xbf\x19\xceM\xe7\xc8\xdc\x94?\xce\xdeG\xbd\xcak\xd7\xbf\x13\xe5-P4\xa1\xba?\x8a/[\xba\xdf\x01\xd3\xbf\xc6\xc9\xe1\x8e_\x81\x98?\xa8\xaee\xe2}\xa1<?$j>\x10I\xce\xad?\xc2F1\xe1\xdb\x00\xcd?O\x87\x83\x8e3\x81]\xbf\xce\xe9h\xb9\x88/\xb5\xbf A\x9c.\x8a\xee\xbe?\xe7}<h[\x12\xb6\xbf\x9foh\xf9\xac.\xb3\xbf]\x06\x8d\xe2\x9f\x98\xa4?I$W\xae\xa6)\xb8\xbfj\t7r\xf6s\xd1?,\xb5\x86\x92ga\xa0\xbfh\x02V\xcf\xfd\x05\x8b\xbf~\x82\x1c\xc1\xcfp\x9a?iy\xd4Q\x19\x17\xd2\xbf\xd7\xa45\xbe)\x83\xce\xbf\xf3u\xfa\xd7\x92\x1b\xd5?al\xd5\xadR\xd7\xa6?`\xb6\x1f\x1a\xa7\x8b\xca?c\xffo\x98\xe0\n\xc3?qU\x84IG\xcc\xb1?\x11o\xde\x96\x7f\x94o?\xe4\xe6\xc5\xfcBk\xcf\xbf\x06\x9d\x0e\x93!\\l?7\xa4\x90\x9b(ue?(\x96\x16\xb0e\xee\xb8\xbf\x16\x1e\xe0\xee\xec\xd5\xbc?4t\xfd\x1cMf\xcc\xbf\xf2\xb25\x15\xf7\x9b\xa1?\xd8\xa1^vP^\xd3?\xb8-\xcf\x9a8\xae\xb0\xbf\xeb\xaea9\x1b\xac\x8a?\xf5U&\xca\xb9F\x95?\xcc\x1d\x89\xd1\xbe\x7f\xca?^c\xe2\xb2\x15\xc4\xd0\xbf?\xd4p\x0b6S\xbc?T_\xfbfD\x81\xcf\xbf\xb4KQ\xd9\x10U\xd3\xbf\xba\xe5qp\xf9-\xa1\xbf\x8e\xb0\x9bkr\xf1\xd1?\xfe\x98\xfe\xfa\xd5\xf7\x96\xbf\xedy\xa4%KT\xc0?\x98a\xd6\xffx\x90\xb9?\x07\x04\xc7\xba\x86\x8c\xbc\xbf\x1b\x9a\x17\x89V\xf8\xbf\xbf\xfb\xa7i\xf5\x11a\xb1?\xaa\xde\x05\x9a+\xd4\xa9?e\xdd\x90\xe6[8\x95?\xaa\x9e\x10\x87&\xab\xca?\xa5\xfa\xc1\xd4\xd4\x97\xbd\xbfV\xd0\xdbD\xd4Z\xb8\xbf\xb3H\rT]i\xd2\xbf\x03\x8a_\xcf\xda\x8c\x9b\xbf\xea*\xf6\xa1\x92\xf7\xa4?}\xf1~\x12\xe8\xa0\xc5?y\xda\x86\xea\xa5%\xc2\xbf\xdeb\xc5\xcf\x08\xff\xd0\xbf\x0b\xf8\x8e\xdb\xd5+\x92\xbf\xe7\xc4\xae\x93|\xbe\x99\xbf+\x16b\xa3J\xde\xc8?5\xfb\xe8\xa9\xafE\xcc?2J\x16!\xe4\xf7\xb6\xbf\x8b\xc8\xa3\xe6\x8by\x9d\xbf\x80\xa9)\x8dU`\xd8\xbf\xd7\xe0Nk\xf6"X\xbfx\x8c\xd3\xbdO9\xc9\xbf\x0b\xdc}\x8dY\xd8\xca\xbf:\xbbpr\xcf\xbd\xa6\xbf\x0f~\xb9\']\x07\xb5\xbf\x99T\x0f\\BW\xd0?\xaaZ\xf0rK/\xa6\xbfA\xc9\xde#\xb0\x1a\xd9?o\x8f\x0b\t\x9f\xfc\x80?\xa5\x01A\x8ceP\xbc\xbf\x90}\xc0\xad\x9f\xf4\xa2?\x11\xb8\xdb\xcc\x0c}v?B\xed\x11\xdc\xeb\x10\xb0?\xbc\xf2\x81\xf5\xb6\xb5\xbc?q\xbb1L\xe3\xa5\xb1?6\xba\x97\xb3H\xd9\xb5\xbf\xb2\x918\x07g\xec\xa2?\x94:\x99+\x0f*\x9a?\x80\xda\x81\x1cD\xa9\xab\xbf\xe6\x086[F@\xc7\xbf\\\xea\xb2\xc89\x13\xbb?\x813\x19\xdd\xfa\xe3\xd8?: \xa4p1\x89\x8f?\xb6\xe2EH\xd3\x12\xc2\xbf\xe7\xad\xefu\xb5~\xbf\xbf-\xe8\n\x92[f\xb2\xbf\xd0\x17q[u\xe9\x83?\xc1\xa5\x02\xc5=-\xc3?\x84t\x85\xc2\xb8<\x98?\xfa0\x97(@\x8d\x9d?\xb9z\xc3\x7f#\xe7I?]M\xf7T\xeb\x1c\xb9\xbf\xfb=\x19\x14\n\xd2\xb1?^\xa1i\xb4;\\\xa4\xbf\xfb|Qvp\xb1\xaf\xbf\xff\xac\xa6\x83v\xc3\xc5??\xf37\x1a\x0eG\xce\xbf\x15\xdb\xd1\x1e1\xe1\xb3\xbf\x92,\x8f\x9f6v\xba?L\xe2\xb2\xce\x7f8\xb9?\xef\x03\xb9nQ\xd4\xa9\xbf7}_\xf0\x1c\xd8\xb7\xbf+\x91\xcc`\xfe\xcd\xa7\xbf(\x7fqs\x9cs\xcf\xbf\xd6\x97V\x03\x14\xc5\xb3\xbf\xbcG"\x06\xf0I\xd5?G\xa7\xccr\xe5\r\xba\xbf\xd4\xc1\xd5\x81\xbb\xc3\xc6\xbf\x88n\xa4\x96\'\xad\x9f\xbf\x87}sKO\xc9\xba?r\xe3\xf0\xa2\xbaw\xc4? \xf4\x97\xd3M\x18\xdb\xbf\x05\xe1\\\xbe\x03\xaf\xc3?R\xcd\x9c\xbdW\x8c\xd5?\xa5\xb3\xa1#\x1by\xba?\x14\xcb\xb1f\xf8\x9b\xb8?R\xf6\x08\xa3\xe4\xf8\x93\xbf\x941\xba\xfd\x8a}\xb4?\xe7\xf4A\x82\xfe:\xc0\xbf\xae\xad]\xe5\'\x8c\xad?\xa6\x11\x19\xa3\x89\x16\xda?\x8b?X<\x02\xeb\xcc\xbf]4\xcd\xc1\x0f\x0c\xad?Q\x04C\x8bj%\xc8?\xec\xff\xd4\xfd\xd9\x1d\xd6\xbfdE\x00\t\x8c\xf3\xbd\xbf|\xc6\xa7\x04\xd0x\xc4?\x8a\xe9\xe4\xd9\x1d\xd2\xc4\xbf\xf7\xc2`\x93\x05F\x8c\xbf\x9f\xdb\th\xdf\x14\xab?\xd8\x81\xa5\x97\xe1\xb9\xbf\xbf\xe3\x91\x9f\x10W\xb9\xcb\xbf\xba\xac&\xb7\x89\xf4\xd0?\xe1Q\x1f\x8c\x05Y\xc6?\x11\xa2%uIL\x90\xbf~p\x92\x930\xf1e\xbf\x00>\xea\xa49y\xca\xbf\x99{`%E_{\xbf/\xbfu\xa1\x15\xa3\xc4?\x83\x8d\xb9\xcf\x9b9\xc0\xbf\\\xaa\x19\x17\xc2\xd3\x80?\xc6\x18\xc73\x90\xed\xd0\xbfo\xe1\x99Z\x81\xacz\xbf\x01\x1b\xee\x13\xa7\x10\xd1?\x06\x87\xba\x1c\xa1-\xb8\xbf\xf6\xd0\x08\xf5|u|\xbfO\xfd\t{\xa71\xb4\xbf\n\x1bnh\xd9\x1f\xcd?\x7f\xf6\x8f\xa1\xeaZd?\xbc\xdf\x17\xa6R\x9b\x9e?o\xd9\xda\xd0\xda{\xc3?\xaa\x16\xdeY.L\xaa\xbf_\xdd\xd4\xc5o\x8c\x88?\x1fV\xc6GI\xcd|?\'m\xa4\x82n\xb9\xc8\xbf\x04\xe3\xd6au\x91\xca?l\x0f\x13\xe0u\x99\xb0\xbf\xe9$\x1a\x98~Y\xc0\xbf\x9d\xfds\x8aa\xff\x9f\xbf\xa8\xd0v}\xac\x99\xab?\xd7B\x90yoQ\xc2?\x85{\x022{n\xcd?]\xb3]2\x04\x06\xb5?md\x03/\xd0\xdf\xd0\xbfa!\xc2\xeebH\xa9\xbf\xdd\xcerR&\xcd\xbf?\xd1 {\xa2\xcf\x1f\x94?\xbb3\x13\xa3:\xab\xc4\xbf3n\xcf\xb49\xdb\xa6\xbfyB&\xbbo6\xa7\xbfT\'%,!\x86\xa3\xbf!\\\xd6H&\x96\xb0?[\x97\x05\x10v\x95\x9d?\x85\x05\x927W\xa4\xac\xbf\x1e\x14\x0bJ\x90\xc0\xb9?\x91\xdeZ\xff\x15\x83\x97?H\xb5\x1c\xf5cB\xab?\xd9\x81c\xe6\xb2\x19r\xbfe\x8e\xc6{@G\xa3?\xd3\x04L\x9f\xc7\x95\xd1\xbf\xae\xe0\xf2\x8b\xed\x9f\xb3?\xda%X2:V\xda?\xe5\xc9S\xf1\x15\xa7\x90?\x8f\xc1\x96\xa4?\xd3\xb9\xbf\t9J6-\x15\x82?e@F\x11\x88\xe7\xd3\xbf\xcb\xdf\n<\x8d\x81\xb4?\xee\xe1REq\xe1\xc9?\xd2\x97\xf2\x15\xa2,\xab?\xc1\x1a7+;\xf0\xd3?\xd0\xa1\xea\xba\xb2\xf3\xac\xbf\xf2\xb6w\x87\xed\xde\xd4\xbf\x12\xbe\xd5\xa3\xfd\xe5\xc2\xbf\xec\x05\xf1\xb8\xc9\xdc\xa5?\xeb\n\xc1\x0e\xf3"\xd1\xbf\xfe\x99\x02\x9c\x17|\xb7\xbf\xd0\xcbM\xc4\xcc\xd0\x94\xbfm\x86\xeb?\x16\xe8\xa4\xbf\x914\x8ck\xf6\xc8\xb7\xbf&\xf3\x8f\xe2\xd6\xa0\x8b\xbf\xa0\xed\xf7z\x91O\xa8?\xbca\x985\x98I\xab\xbfTBt\xfe[\x90\xc2?\x90\x95\x9f\x0c\xb2\xd8\xa6?\xe8U\xd6\x91\x90\x05\xb0?\xe0w8\x10`\xfc\xb3?\xeens\x85\xf8i\xc6\xbf\x13Aa=\xbeo\xbb?\x0be\x1a\x81n\xe7\x8e\xbfV\x8b\xf8+\xcd\x8a\xc2?1^\xea\xb1\xb4:\xd1?\x07\xe6@\xfd\xb3@\xe2\xbf|\x88\x8b\x89\xa0\xf4\xc9\xbfV\xec\x89@\xa1\t\xc0?\x95oUj\t>\xae\xbf{\x99\xe6\xfd\xb8k\xdb?\xb0\xc2\xd4K\xed\xd5\xa5?*3\x9d\r\xbc\x04\xc0\xbf3\xdca\xdc\xb5\xbb\xc2?\xd6\n\xd1e\xb7&\xc2?\xe1J\xb0\x842\x86f\xbf/JAC\xa8%\xb0?B\xaaL\xe1J\xf3\x80\xbfc\xd8"\xd9A\x91\xa8?\xe0\xb4\xa1\xec\x83\x1b\xb0?Ki\x00\x85yv\xbe\xbf\xd2\x01\x19\xdf\x8c\x9e\xb6?\xc9\xfe\x03\xffq\xea\xc5\xbf\x99\xb9\x06\x97_\xc2\x92\xbf\x17\x19\xeb\xca\xc1\xe3\xc8?W\x16\xba\xf3g\xf6\xb0\xbf<_\x86~R\xe5\x9e?\xfb&i\xa1\xf3y\xad\xbf\x93r\n\x8d\xa4D\xcc\xbf\x00\x08\xf11:\xb4\xc2\xbf\xe3\x83\xddO\\\xb8\xb3\xbf\xedp\xcfy\x1a\xb7\xad?x\xb0\xb3\xfb\xfd\xbf\x9b\xbf \xae:\xba<\xe4\x9c\xbf\xe7u\x00\rPz\x93?\x14*b\x8e6`\xd1?\xc6\xfa$\x9d)\xb5\xa6\xbfO\x0e\xff\xb7J\x94\xd4?E\xf2\x1f\x12\xbe\x80\x89?lK\xb3\xc6\xa2\x12\xdb?j*\xc5\xdf\xdd~\x8c?\x9b\xf5L\xf9\xdep\xb1\xbf\xce\xe7P(\x8b\xb8\xaa?G\x82C\x9fqE\xaf?\x0e\xa8\xcd\xec\xf4U\xf9>\x9e\x87\x94p\xb5l\xa7?\xd9\x06\xd5\xa0\x07]v?\xfb;_G\xd1f\xce?;\xe0\xf5\xa7\xdb\x08\xb1?\xc8\xc2\xd9\x92C\x08\xbb?\x04eQ\xf6\xa7\xb7\xb5\xbf\xac\xf1_f%\xc4\xb2?\xef\xb40\xc5;\x1br?\x85\xd1\xe3\x1aH\x87\x83?\xb1\x127\xb5\xad,\xb4?Ka\x0c\x1a\xa0\xe9\xa5\xbf\xe9\xde\x8f9\xc0:\xbe?\xf2t+\x90O\xfd\xd3\xbfU\xb9"\xac\xa4Y\xa2\xbf\'\x98\xe3r\x8a\x11\xd6\xbf\x83\xf8{}\xb8r\xad\xbf\xe5"\xe2\xe5xS\xde\xbf\xbe\xe1\xa9X\xe7d\xa7\xbf1G\xe5\x1a\xf1\xe9\xa5\xbf\x12\x18\x02\xc18I\xba\xbf\xd1g\n\xa605\xc6\xbf\xab\xb1\xc2 \xd3\xf1\x97?\xec\x834\xa0\\\x90\xbd\xbf|\xd8\x92\x8b\xce\xab\xb4?\xff\xa6~!SFt\xbf6w\xd3\xc6\xa2c\x94?I\xbdv\x19F\x03\x9b\xbf.\x16\xe3\xa0\x1f\xb9\xc2?\xdd\x8d\n\xee\x00C\xaa?y\x81z\xc6v\x85\xe4\xbf&\xb15\xe3\xa2\xb4\x8f\xbfH\x1f\x85?\xde\xbc\xa7?\xd1\xd8\x9c\xfdu\n\xae\xbf\xc6\xaf\x1a{\x17\xb6\xe3?\'r\xbd0\x90\x90\x92\xbf\xcemn\x87\x80\xd8\xc3\xbf\xd7{\\\x96\x99\x80\xaa?\x1f\xf1\xfd\xf0\xa0\x0f\x86?\x92C:#\xa5\xb5\x91\xbf\xc0(\xdb\xafh\xc7\x7f?\xef_\xbe$q@\x98\xbf\xda!b|\xe3;u?\x9dE\t\x86\x8c\xb6\xa0?\x90G\xda\x82D0a\xbf\xed)\xb7A[\xc1\xb5?*H`+[\xeef\xbf\xefg\xed/$\x11\xa1\xbf\x92\xdcK\xfa\x9b\xb0v\xbf\xd6\x97!-\x19\x96\x8b\xbf\\\xc5\xa2b\x94\n\x9f\xbf\x1f\x95)\xc4:\xdc\x92?\xe3\x0c\x13\x8b\x05\x93\xd2?s\xdd\x8d\xe9\xb3\xdc\x97?\xd5\x1d:W\x93\xac\xbb\xbf9h\x9c\xa9\xd8\xfe5\xbfd\x08\xd6\xbd\xb5\xdd\xc7\xbf\xfc^\x13&\x12\x90\xa1?\xbb\x86\x85\xb7w\xd9\x8a\xbfU\x17\xbbP\xc4\x01y?N\xdec\xb2\xab\xacr\xbfg\x99\xf7.\r\xf1\xa4\xbfp\x1c{\x00\x1d\x90\x9b\xbf\xb7\x1b$\xe1i\xfa\xac\xbfBb\x92\xc8\x91I\xa1?\x7f\x83\t\xe8\x94"X?\xe8\xe7c\x0f\xf0\xa0\x91?;{A\x9eK\x81\x96\xbf\xaa\xcf\x19\xf2\xd8vs\xbf\xf3\xa8r\xc75bp\xbfNf$T::\x84\xbf\xd1\xb7 ?\xd4\x86\xc7\xbf\xbc*\xbcz\x98L\xad\xbfV\x05q\xd5G\xf3\xb3\xbf\x85:_G\x12E\xa8?PZ\x12N\xa6\xaa\xcf?\'\xe3\xfbk\xc9\xa5v\xbf\x84h%\x99!8z\xbf\x01p"\xe3\xe0\xf5\xa1\xbf;u\x8f\x1d\xea\xa7{?\x84\xefD\x19 \x8c\x96\xbf\x1aW[+\x15=\x93\xbfyi9C-\xa7\xb8?.\rO\xdck\xcf\xc6?\x9b\x8aK\xd0\xd0\xdf\xd7\xbf\xdd\xb2{J\xc8\xf6\x89?\x95\xd6\x9a\x18\xff\x8c\xd4?\xa1B\xb1\xe4n\xd8\xcb\xbfG OuRF\xab\xbf:\x88N\x1b\xcc\x89\xa4?\xddn\xecv^\xfc\x80\xbf0rH\x1c\xbf\xd9\x8e?z\xa6\x07\xad\x08\x80\xcb\xbf\x13VJ>\xd7\xad\xbc?\x9f}\x9b\xb8<\xb5\xdc?\xb2.\xc9\xe5\xed\xd3\xb4?\x07J\x17\xfaUw\xcc\xbf\xc3\x9b\xb8\xcf\x97\xb4\xc0\xbfW?\x96\xd1\x81\xe5\xa7?\x93\xca\xf2}\xf8\x98]\xbf\x91\x82F\xf9!\t\xc7?\xd2\'\x98u?5\xcf\xbft\xe5\xb0_c\xed\xcd\xbf\xa4\xf0^\xe55\x04\xa5\xbf,\xef\x05\xf3\xeb8\xb5?\xae\xd9\xe7\x83\xf6A\xd0?\x8c\x8c\xb8\x9c\xb4\x89\x8f\xbfk\xb0e\xa2\xee%w\xbfbIb\xc5\xc8b|\xbf\xb6G\xd7\xd8\xb4\x0b\xa0\xbfh\t\xa7\xec\x85h\\\xbft\xd3y\x81\xd2\x03\xc8??h\x92O2\x0f\x91\xbf1w\x0e\n\xc9\x9b\xc2?yz\x98 \xc4\x81\x9d?70&\x0f\xae\xf3\xc9\xbf\'\xd9k\xd0{\x1bt\xbf\x10\x0e&\xaa<t\x8e?9\xc5\x17\x8f\xa0\xa6{?/\xe5\xb0\xdf)~\x84?\x88!{?_\xb4\x98\xbf\xf9\xc3?\x9d\xf0c\xa3\xbf\xd1\x8d\xaa\xe2\xca\xa4\xd0?\x8d\x9e\x99]\xf8\x19\xbe?<y\xe0Z\x04M\xe3\xbf\xbef\x14\xcf1#\xc4\xbf\x93y\x9f{\x12?\xdb?\x0b\xe8\x15\x7f\xe0\xfc\xb6?\xb4Mzl]\x7f\xb0\xbf\xf1\xc8\xb8\xb5T\xf4\xae?g\xa2 yqm\x9b?c\xc2\x93:\x07\xe8f\xbf\xbd8\xab\xfa]\xab\xb3?hc\xa9\xb42\xca\xc2\xbf\xc7\xdeBxp\x1b\xc4\xbf\xacY/\xb2\xdeEt?b\xd8\xb9\xff\x95\xfa\xb4?Y{\xb3z\x11j_\xbf\xb4\xbb\xf0y\xd3\xe91?\x01L\xda)4\xdce\xbf\x11\x8fR\xbbD8\xc1\xbf\x99\xb6\xe9\x9cC\xd7\xc0?)\x9bx\x98iF\xd2?~\xcd\x0b\x11\x06\xd6\x80\xbf\xfe4\x9b\x8aR\xf6\xc4\xbf\xf5\x19RKE\x1b\xc5\xbf\xb0\x98\xde\x93\xcd\x9a\x84\xbf\n\x1f\xa0\x0f}c\x86?\xb7\r\x97\xbek\x98K\xbf\xe4T\x85\x84b\xf1\xa4\xbf\xd4a\x17)`\t\xaa?\x9d\xe3\x0e\xd9\xa6K\xd5?\xf4\x82\xc0Y\xa6\x10\x9c\xbfj\x92W\x96\x10\x15\xd9\xbf\xbf\xce\x80\r\xe8h\x94\xbf\x05>\xb8\xea\xb1;\xc5?\x88\x01&\xe4\x0bDh?pBM\x8f\xd2U\x99\xbf\'\xb0)\xd9\xdf ~?\x0b3\xb2\x92@I\x88?\xb4\xbb\xac\xf1T\xc5\x87\xbfi\xbb8\xd9\xba?\x95?\xf5\xe0\x8c\xf08\xb2\xaa?\x94%e\xc8\x83D\xd5\xbfR\xa2\x94\xa1\xb3\x97\xc0\xbf\x85T\xa0\x9e\xf2\x82\xe4?\x8e\x86CW/\x81\xb5?\xc2H\xd6O\x87\x9a\xd3\xbfR\xcbV_^%Q\xbf\x965\xcf\xb4\xc6\xff\xa5?Tj\xd8\xe6\x97\x00\x91?\xc7\xd0\x96\xc54-\x80\xbf\x19s\xb3\x7f\x92\x07\x9e\xbf\x16\xc8\x17\xcf\xf1\x8f\xa1\xbf\xa6C\xc2{\x9c\xc6\xa3\xbf\xbes?\x005\xa3\xc2\xbf>\x86\x8c\xb0\x16_\xaf?\xd0)7@\xa8\xb2\xbc?\xa0\x87\xf8\xdf\xfa.\x81\xbf\x05+\xa4)wc\x82\xbf(`\xbe)\xd7\xe0\xa7\xbf\xc1\x98P\x7f\x04\x94\x99?\xb5\xa6\x1d\x14\tY\xb7?\x1d)\x93\xa9lb\x91\xbf\xd0U#\xb3\xa2\xf5\xa5\xbfr>\x02F\xfeE\xab\xbf(\xb5\xb0\x80z6}?\x94f\x01\xd6\x86\x83v\xbf\x0e\x81\x9d\xdd\xed\x15B\xbf\xccl\xc8\xdc\x03\x10\x8e\xbf0\xba\xc1\\R\x1a\x99?\xf7^Ni<\xfc\xc9?%}\xfd|\x04L\x82\xbf\x1c|a\xc7ty\xb9\xbf\x12\x93 \xa0\xc9,\x85\xbfR$\xde J\x00\xbb\xbf\x81\x86\x7f\x16\xd6\xc1\x81?V\xd2\xc3\xd4\x92?a\xbf\tr\x03\xb0\xf7@\x81?|+\x99\xaa\xf6\xd1A?\xc4PN\x90\x9a\xc3b?\x85\xfd\x1b\xce\x08\x0fq?0G1\x10\xa1\xcb\x91\xbf\x9d#(\x11\x17\xe9\x91?\x8e\xa7x\x04\xbe?\xb0?.\xda\xac\x0f\xcd*\x96\xbf#\xb4\xef\x0f\xfe\xa0\xa7\xbf\xda\x08t\xb71\x1f\x86\xbf\xee\xf6\xa4\t\xa6\x83g?\xc0\xd8?CX\x07\x83?\xa2\x9e\x008&0\x81\xbf\x81\x8b\xff2\x04*~?\x13G\xee\xd1\xc6$\x82?\x9a(15\xf2\xc8\xe2\xbf\x7f\xe2\x14\xa7\xf4q\x84\xbf\x9e\xc4%\x98\xcf\xfa\xdf?7e\xb0!\x03\x1b\x94\xbfD\x89\xf6\xe7xx\xb7?\n\x01l)0\xe7\x89\xbfZ\xc7\x15\xb0Xho\xbf\xfe-\xa7\xa25\x17\x96?bm\x1b\x91\xceMr?B\xd9\xees\xa6k\x9b\xbf,0T3iY\xd9\xbfj\xcd\xfblc\x96\x8a?\xc4\x93/$e,\xda?\x99\r\xcb\x83\xe2\xbef?\xa1?\x18s\x9c\x90S\xbfm\xa7\x0f\xd0\xdb\xcfh?{\xd5\xa0\t\xd9Z\x86?\xde\xdd\x95\x94(T\x93?\xc8 W\x97\x85~\xd0\xbf\x1f+\xe0\xaf\xfcb\x92\xbf\xfc\xc7C\xe2\xb0\x15\xe1?\xd4\xca\xfbPIbc\xbf\x0b\x02\x88\xaf\x0e\xa7\xce\xbf\xcb\xd6S\xf0\x0c\xf1j\xbf\x97Rz\xc0{fU\xbfc\xd01\x05:\x1dE?\x81<U\x9d\x18\x9ex?_[x(\x9f\nx\xbf]\xf6\x9b.\xaf\x1c\x81?\xe0\xbe=#\xc3[\x9c?1\xe8\xe0uY\x1b\xc1\xbfm\xa7\x98B\xad\n\x9f\xbfvl\xb4lR\x85\xd1?\x1e\xec\x84\x87\xc5\xd5}?\xf1\xeaI\xae\xec\x93\xc5\xbf^\x14W&\x8e1s\xbf\xb9Lr\xf5H2X\xbf\xfa\x10\xbb1\xa3nf\xbf\x07\xe4\x85\x1f\xa2#J\xbf\xb9\xc54v\xb0\xaa\x9f\xbf\x0c\xdb\xd9\x8a\xfc\xde\xbd?\xea\x04\x13\xdb)\xf0\x9b?ND^\xf0\xa2\x85\xd3\xbf*\xfb\x1bsY#y?\x92>\x8a\x89\xcb\xf0\xc0?\x12c(B\x90\xf4=\xbf\x9a\x1chDc9\x80\xbf\x81.\xb5m\xb6qz\xbf\x1f\xbb\xc2\t\xc2\x05\xcc?\xe1\x0cQ7Co\x90? v\x134\xd3\xa8\xdd\xbf\x1aW0q\xf5Qs\xbf,#.\xb0=\xe6\xd1?}\xd6\x83H\xb0v\x7f?\xfd\x91\x9e\x93u\xedz\xbf\xce\xb3wL\x9f\xaa\x85?\x83\xeaV\t3C\x80\xbf\xb7"\xe7\xd4\xd9 \x91?YHv\x08\x10\xfb\xb3?\xfd=\x82\xed\xea\xbeu\xbf\xa9?C\xd0\xd1~\xbb?t\xea\xd3\x1f\xbc\xc5\x8c\xbf\xd9\x82\xa1\xc0M`\xc1\xbfqlX\x00\x94\xe8h?Yqz\xc7\xf9\x9fP?\x08\xa2\xed\xd3\x7fhL\xbf3\x9e\xf5\x0fb\xd5l\xbf\x8ao\x16\xd0\xfa|g\xbfia\rJ\x9c\xa0`\xbf\'\x95\x04\x01\xc6np\xbf\xfb\xf9CN/\xf9\xbe?\xb9[\xd7\x0e\x87mv?\xe6[\xb2\x97\xe6\x1f\xab\xbf\x16i\xd9zQ@}\xbfC\xf5\x8e\xb4\x0b\n\xac\xbf3M\x1c\xe9cI\x80?2\x18\x074tpw?\xa2\x1d\xdcUY\xd2\x95\xbf\xee\xc4\xd8M\xed^\x84?\x8c\x03\x02\x0e\x8b\xc8[?5\x07Z\x12\xea\xda\xcd\xbf\xffS\xb76\xe8\xec\x97\xbf\xee\xb6PPi}\xd6\xbf\xec2\xae\xba\xed\x89t?\x8c\x8e;"\xdcA\xe2?\xc1\xbf\xbb]["s?\x9e\xe4^.\x0e\x94d?\xf6\xb8\xb9rfwW\xbf\xc3\xf7\xcem\x91\xaf\xe1\xbf\xa9\x81\xf4\xab\x13\xe1\x93?\xb7i\xed:\xfdB\xd3?E\xf6\xec\x1d\x07\x1b_\xbfP\x96u_M\x14\xca?'
|
434 |
-
p179
|
435 |
-
tp180
|
436 |
-
bsg79
|
437 |
-
g64
|
438 |
-
(g65
|
439 |
-
(I0
|
440 |
-
tp181
|
441 |
-
g67
|
442 |
-
tp182
|
443 |
-
Rp183
|
444 |
-
(I1
|
445 |
-
(I40
|
446 |
-
tp184
|
447 |
-
g74
|
448 |
-
I00
|
449 |
-
S'+\xc4\xbcx\x90\x9b\x19\xc0\xf7\xc2:\x7f<\x0cV\xc0<%\x19\x9a\x87\xee4\xc0\x0c\xc6r\xc3^\x86K\xc0R`B\xfat?<\xc0nI\xd2\x0f\x93\xbc6\xc0\xe3\x99\xe1U@\x916\xc0\x99\'U\x8e\x02\xf9\xf8\xbf\x08!\x9c\xee\x16\x9f<\xc0Au\x8a}\xc3l6@wN\xe1D)\x845\xc0E\xa7\xb6\x90;\xc7K@R!\xe1K\x10\xf4"\xc0\xc5\xfa\xdd\x11D\xf3U@\x81\x8c\xc0\x9c\x9fN5@\x90w\t\x03\xf1\x8aL@p\x9f\xdc\x1e\x19\x87A@g\xf04\xe9h\x989@\x1b\x9a~\xd2}\nC@B\xebG\x95\x95\x95\xb4\xbfp\xa4q\x1c\xfb\x04B@a]\xfbU6\x977\xc0\xb5\xa4|\xd9\xf5n7@\x86=\xd3)\x16\xc5K\xc0\xb8z\x156\xb2\xf8\x14\xc0n\x16\xaf\xf0-\x9aR\xc0\x8e\xda=c\x88\xa8 \xc0\x07\xa2\x8d\xd5\xcc\xc66\xc0\x17PH\xd2\xae\x11\x19\xc0\xccP\x89\x14\x0b\xca\xf2\xbf@\xfd}\xef\x881"\xc0h\xeb\xb8\xfd7\x0f7@\xcf\x1f<\x11;u\x1e\xc0\x05\xc1"\xb8\xb5\x81R@\x0fV\xe42E\x7f\x15@}\x8d\x1f+\x13\x987@\xd6 yRQK!@(\x17W\x12\xec\x89\xef\xbf\x02\x8fH\'\x96\x80\x19@%n69x\x0c7\xc0'
|
450 |
-
p185
|
451 |
-
tp186
|
452 |
-
bsS'n_samples'
|
453 |
-
p187
|
454 |
-
I3148
|
455 |
-
sS'_n_active_components'
|
456 |
-
p188
|
457 |
-
I36
|
458 |
-
sbsbasS'reference_shape'
|
459 |
-
p189
|
460 |
-
g0
|
461 |
-
(g87
|
462 |
-
g2
|
463 |
-
Ntp190
|
464 |
-
Rp191
|
465 |
-
(dp192
|
466 |
-
g91
|
467 |
-
g64
|
468 |
-
(g65
|
469 |
-
(I0
|
470 |
-
tp193
|
471 |
-
g67
|
472 |
-
tp194
|
473 |
-
Rp195
|
474 |
-
(I1
|
475 |
-
(I20
|
476 |
-
I2
|
477 |
-
tp196
|
478 |
-
g74
|
479 |
-
I00
|
480 |
-
S'\x1f\xd0\xa4\xc6\x16\x9dx@\xff\xe4\xc0F\x8f\xbek@\x92\x83\x8a\xda\xe1\xaew@\x9dy\rz\x0b\x0ep@\x1aM\x8e\xbd\x81:w@D\x89\xe3\x88*5r@\x89\x1dp1\x10\x98w@\xaa\xb7\x08\xaf\xdf\x9es@!\x96&j\x9c3w@\xd2\x9f\x96\x8af9u@xn\xba\xf6d\xa4w@\xbd`\xfcA\x0flw@9a\xf83\xc9ix@\xe9\xd4\x98\x81\x9e\x8ay@\xc9\xa0\x97lP\x88z@\xb9\x0f\xdbEQ\x85w@5\xa4u\xbd\xd0z{@n\x1b-R"nu@\x11\xbcJ%\xb2\xae{@\xf3\xd6K/\xe8\xb6s@\x18y0\xb1)\x8b{@\xe1\xfa\xfc\x8a\x1b&r@\x92\x05\x957&\xacz@=\xd8\xb9\x934\x05p@\xb6\xd5\xdc\xfdZ\xb4x@\x13\xe0D\x0f6\x87m@O\x9fW5\x82\x82x@\xbbX\x7f\xf2W4r@I\x9f\xccL\x0b\xa5x@\x92/\nC\x19\xa5s@\x94\xd8\xe66\xeatx@~\x1b\x14A\xb5Cu@#jk\x91\xa0\x8cx@B]\x13\x1cl\xa6x@x\x8ec\xceW\x8dy@\x11\x1e\xbe\xd3{Mu@K\xeb\xad\x00\x99\xc5y@\x11\x82\xb4GP\xa8s@\xfe\xda\xd7x\xb6\x9ey@5*0}\x03/r@'
|
481 |
-
p197
|
482 |
-
tp198
|
483 |
-
bsg98
|
484 |
-
Nsbsg48
|
485 |
-
(lp199
|
486 |
-
g49
|
487 |
-
asg51
|
488 |
-
g34
|
489 |
-
sg46
|
490 |
Nsb.
|
|
|
1 |
+
ccopy_reg
|
2 |
+
_reconstructor
|
3 |
+
p0
|
4 |
+
(cmenpofit.clm.base
|
5 |
+
CLM
|
6 |
+
p1
|
7 |
+
c__builtin__
|
8 |
+
object
|
9 |
+
p2
|
10 |
+
Ntp3
|
11 |
+
Rp4
|
12 |
+
(dp5
|
13 |
+
S'opt'
|
14 |
+
p6
|
15 |
+
(dp7
|
16 |
+
S'ablation'
|
17 |
+
p8
|
18 |
+
(I01
|
19 |
+
I01
|
20 |
+
tp9
|
21 |
+
sS'verbose'
|
22 |
+
p10
|
23 |
+
I00
|
24 |
+
sS'rho2'
|
25 |
+
p11
|
26 |
+
I20
|
27 |
+
sS'sigRate'
|
28 |
+
p12
|
29 |
+
F0.25
|
30 |
+
sS'ratio2'
|
31 |
+
p13
|
32 |
+
F0.08
|
33 |
+
sS'imgDir'
|
34 |
+
p14
|
35 |
+
S'/Users/arik/Desktop/artistic_faces/applications/AF_sample'
|
36 |
+
p15
|
37 |
+
sS'dataset'
|
38 |
+
p16
|
39 |
+
S'demo'
|
40 |
+
p17
|
41 |
+
sS'ratio1'
|
42 |
+
p18
|
43 |
+
F0.12
|
44 |
+
sS'smooth'
|
45 |
+
p19
|
46 |
+
I01
|
47 |
+
sS'pdm_rho'
|
48 |
+
p20
|
49 |
+
I20
|
50 |
+
sS'sigOffset'
|
51 |
+
p21
|
52 |
+
I25
|
53 |
+
sS'kernel_covariance'
|
54 |
+
p22
|
55 |
+
I10
|
56 |
+
sS'numIter'
|
57 |
+
p23
|
58 |
+
I5
|
59 |
+
ssS'_shape_model_cls'
|
60 |
+
p24
|
61 |
+
(lp25
|
62 |
+
cmenpofit.modelinstance
|
63 |
+
OrthoPDM
|
64 |
+
p26
|
65 |
+
asS'max_shape_components'
|
66 |
+
p27
|
67 |
+
(lp28
|
68 |
+
NasS'scales'
|
69 |
+
p29
|
70 |
+
(lp30
|
71 |
+
I1
|
72 |
+
asS'diagonal'
|
73 |
+
p31
|
74 |
+
I200
|
75 |
+
sS'holistic_features'
|
76 |
+
p32
|
77 |
+
(lp33
|
78 |
+
cmenpo.feature.features
|
79 |
+
no_op
|
80 |
+
p34
|
81 |
+
asS'patch_shape'
|
82 |
+
p35
|
83 |
+
(lp36
|
84 |
+
(I8
|
85 |
+
I8
|
86 |
+
tp37
|
87 |
+
asS'expert_ensemble_cls'
|
88 |
+
p38
|
89 |
+
(lp39
|
90 |
+
cmenpofit.clm.expert.ensemble
|
91 |
+
FcnFilterExpertEnsemble
|
92 |
+
p40
|
93 |
+
asS'expert_ensembles'
|
94 |
+
p41
|
95 |
+
(lp42
|
96 |
+
g0
|
97 |
+
(g40
|
98 |
+
g2
|
99 |
+
Ntp43
|
100 |
+
Rp44
|
101 |
+
(dp45
|
102 |
+
S'sample_offsets'
|
103 |
+
p46
|
104 |
+
NsS'cosine_mask'
|
105 |
+
p47
|
106 |
+
I01
|
107 |
+
sS'context_shape'
|
108 |
+
p48
|
109 |
+
(I8
|
110 |
+
I8
|
111 |
+
tp49
|
112 |
+
sg35
|
113 |
+
g37
|
114 |
+
sS'response_covariance'
|
115 |
+
p50
|
116 |
+
I3
|
117 |
+
sS'patch_normalisation'
|
118 |
+
p51
|
119 |
+
g34
|
120 |
+
sS'_icf'
|
121 |
+
p52
|
122 |
+
Nsbasg47
|
123 |
+
I01
|
124 |
+
sS'shape_models'
|
125 |
+
p53
|
126 |
+
(lp54
|
127 |
+
g0
|
128 |
+
(g26
|
129 |
+
g2
|
130 |
+
Ntp55
|
131 |
+
Rp56
|
132 |
+
(dp57
|
133 |
+
S'similarity_model'
|
134 |
+
p58
|
135 |
+
g0
|
136 |
+
(cmenpofit.modelinstance
|
137 |
+
_SimilarityModel
|
138 |
+
p59
|
139 |
+
g2
|
140 |
+
Ntp60
|
141 |
+
Rp61
|
142 |
+
(dp62
|
143 |
+
S'_components'
|
144 |
+
p63
|
145 |
+
cnumpy.core.multiarray
|
146 |
+
_reconstruct
|
147 |
+
p64
|
148 |
+
(cnumpy
|
149 |
+
ndarray
|
150 |
+
p65
|
151 |
+
(I0
|
152 |
+
tp66
|
153 |
+
S'b'
|
154 |
+
p67
|
155 |
+
tp68
|
156 |
+
Rp69
|
157 |
+
(I1
|
158 |
+
(I4
|
159 |
+
I40
|
160 |
+
tp70
|
161 |
+
cnumpy
|
162 |
+
dtype
|
163 |
+
p71
|
164 |
+
(S'f8'
|
165 |
+
p72
|
166 |
+
I0
|
167 |
+
I1
|
168 |
+
tp73
|
169 |
+
Rp74
|
170 |
+
(I3
|
171 |
+
S'<'
|
172 |
+
p75
|
173 |
+
NNNI-1
|
174 |
+
I-1
|
175 |
+
I0
|
176 |
+
tp76
|
177 |
+
bI00
|
178 |
+
S'\xc0I\xb4\xb5\xa8\xc6\x9c\xbf\xd5J\xf4m\x93\xc6\xd8\xbf\xfe\xda\xff\x17\x85\x85\xb7\xbfQ\x02\x84\xbb0\xee\xce\xbf\xd0\xa6w]-\xbe\xbf\xbf\x9c\xd6FO\xbb\x8c\xb9\xbf\xdf\xd9\xdd^\x0c\\\xb9\xbf+M\x9f\x10\xfe\x0f|\xbf\x04pM0\xd2\x14\xc0\xbf\x925\x9b\xd1\x0b3\xb9?\xb8\xf6Y,\xaa-\xb8\xbf\x9a\xb4{\xfb\x137\xcf?\xb9\xc7\x9c\tdL\xa5\xbf\xe5\xc2\x8a\x17\x84\xaa\xd8?\xd7\xb4\xba\xc4\x80\xf1\xb7?\xc2\xd1\xcf$\x80\t\xd0?\x12\xd2\x9a\xfbD\xb2\xc3?;\x8e\x95;\x1d\xc3\xbc?V\xad\xb2\xf0\x97e\xc5?\xc9\xaa\x0f\xd4\x91!7\xbf\x9d\xb1\xbc+\xba?\xc4?\xefy\x89\xb1k\x82\xba\xbf\xc6\x8c\xdaE0U\xba?\x81\xea\xe5\x9a\xaa4\xcf\xbf\xbc\x87\xc0\xb6\xf1\x90\x97\xbf(\xe8\xdf\xb2b\xe7\xd4\xbf\xd0\x1a\x17\xbd*\xb8\xa2\xbf\xb4\x88\x0c\xf88\x98\xb9\xbf\xadCH\xd1\xb7+\x9c\xbf\x0c%]\xdf+\x1du\xbf\xffF\xa31\xcbq\xa4\xbf\xd3\xa1y\xe7\x99\xe9\xb9?\x85\x94=\xdb\xf9\x1c\xa1\xbf\x8e\xc1\x11n\xe3\xcb\xd4?C\xb5l0+(\x98?\xc6IN\xd9c\x83\xba?\xf9\x1a[\xb4\x17o\xa3?\xd6\x92CNp\xb8q\xbf\x91\xdfE\xe6W\xa8\x9c?\'\xe3\xe7\x0f\x83\xe6\xb9\xbf\xd7J\xf4m\x93\xc6\xd8?\x9eI\xb4\xb5\xa8\xc6\x9c\xbfQ\x02\x84\xbb0\xee\xce?\x00\xdb\xff\x17\x85\x85\xb7\xbf\x9c\xd6FO\xbb\x8c\xb9?\xd1\xa6w]-\xbe\xbf\xbf(M\x9f\x10\xfe\x0f|?\xdf\xd9\xdd^\x0c\\\xb9\xbf\x925\x9b\xd1\x0b3\xb9\xbf\x03pM0\xd2\x14\xc0\xbf\x99\xb4{\xfb\x137\xcf\xbf\xb8\xf6Y,\xaa-\xb8\xbf\xe5\xc2\x8a\x17\x84\xaa\xd8\xbf\xb8\xc7\x9c\tdL\xa5\xbf\xc2\xd1\xcf$\x80\t\xd0\xbf\xd7\xb4\xba\xc4\x80\xf1\xb7?;\x8e\x95;\x1d\xc3\xbc\xbf\x11\xd2\x9a\xfbD\xb2\xc3?P\xaa\x0f\xd4\x91!7?U\xad\xb2\xf0\x97e\xc5?\xefy\x89\xb1k\x82\xba?\x9e\xb1\xbc+\xba?\xc4?\x81\xea\xe5\x9a\xaa4\xcf?\xc6\x8c\xdaE0U\xba?(\xe8\xdf\xb2b\xe7\xd4?\xbd\x87\xc0\xb6\xf1\x90\x97\xbf\xb4\x88\x0c\xf88\x98\xb9?\xd1\x1a\x17\xbd*\xb8\xa2\xbf\x0b%]\xdf+\x1du?\xadCH\xd1\xb7+\x9c\xbf\xd3\xa1y\xe7\x99\xe9\xb9\xbf\xffF\xa31\xcbq\xa4\xbf\x8f\xc1\x11n\xe3\xcb\xd4\xbf\x85\x94=\xdb\xf9\x1c\xa1\xbf\xc6IN\xd9c\x83\xba\xbfD\xb5l0+(\x98?\xd6\x92CNp\xb8q?\xf9\x1a[\xb4\x17o\xa3?&\xe3\xe7\x0f\x83\xe6\xb9?\x91\xdfE\xe6W\xa8\x9c?\xd0\xed\xbf\xc5%\x9f\xcc\xbf\x08r\x97l\x01,\xa0<\xd9\xed\xbf\xc5%\x9f\xcc\xbf\xd6]\x7f+[Z\x88<\xd7\xed\xbf\xc5%\x9f\xcc\xbf$\xc2\xce\xb8\x10\x1dz\xbc\xd7\xed\xbf\xc5%\x9f\xcc\xbf\x8bv\xc0\xdb^\xdf\x89\xbc\xda\xed\xbf\xc5%\x9f\xcc\xbf\xa6\xa4\x1a\xea\xd8\x83\x9b\xbc\xdb\xed\xbf\xc5%\x9f\xcc\xbfXk\x00C\xe3\x10\xa3\xbc\xdf\xed\xbf\xc5%\x9f\xcc\xbfM\x05_\\\xf6\x0f\xa6\xbc\xde\xed\xbf\xc5%\x9f\xcc\xbf\n\x0cSg9a\x8d\xbc\xde\xed\xbf\xc5%\x9f\xcc\xbf\x9b\xfb\xee\xcf\xbb\xd7\x85<\xdb\xed\xbf\xc5%\x9f\xcc\xbfg6\t\x98\x9eM\x97<\xda\xed\xbf\xc5%\x9f\xcc\xbf\'R\x8b\xa6\x07\x12\xa0<\xd8\xed\xbf\xc5%\x9f\xcc\xbf\xa7L\xb9\xeda^\xa2<\xd4\xed\xbf\xc5%\x9f\xcc\xbf\x08\xfaJ\xff\xa0v\x9d<\xd7\xed\xbf\xc5%\x9f\xcc\xbf\x84>E\xae+\xe5t<\xd9\xed\xbf\xc5%\x9f\xcc\xbf\x1d\xb3\x9fq\x04\tj\xbc\xda\xed\xbf\xc5%\x9f\xcc\xbf7\xcc\x1bE3\xdb\x8d\xbc\xde\xed\xbf\xc5%\x9f\xcc\xbfx\\\x95\x05\xac\xb5\xa2\xbc\xdd\xed\xbf\xc5%\x9f\xcc\xbfL\x00F\x0e\x8c`~\xbc\xda\xed\xbf\xc5%\x9f\xcc\xbf+HV}\xa2\tw<\xd6\xed\xbf\xc5%\x9f\xcc\xbfs\x16[\xd0\x04\xa2\x8b<\xb9n&\xb8\xfc\xb1\x9c\xbc\xd6\xed\xbf\xc5%\x9f\xcc\xbf\x1fv|\xc5\t=y\xbc\xd3\xed\xbf\xc5%\x9f\xcc\xbf\t\x8f\xf37"#{<\xd7\xed\xbf\xc5%\x9f\xcc\xbf\xb3\x86o?\xc2\xee\x86<\xd9\xed\xbf\xc5%\x9f\xcc\xbfY\xa3j[\x19\xd3\x97<\xdc\xed\xbf\xc5%\x9f\xcc\xbf\xeb\xe3\xdb0\xd3\x99\x9e<\xdc\xed\xbf\xc5%\x9f\xcc\xbf\xf2\xa8\xd8P\xc6V\xa3<\xe0\xed\xbf\xc5%\x9f\xcc\xbfo\x83\xf4\xdf`\xf6\x80<\xe1\xed\xbf\xc5%\x9f\xcc\xbfE$\xd3%\xe6\xfc\x84\xbc\xe0\xed\xbf\xc5%\x9f\xcc\xbfjw\xa8\xf2\nx\x95\xbc\xdb\xed\xbf\xc5%\x9f\xcc\xbf\xfd\x17U\xee\xe3\xf1\x9b\xbc\xda\xed\xbf\xc5%\x9f\xcc\xbf\x7fm\xbeQt\x07\xa1\xbc\xd8\xed\xbf\xc5%\x9f\xcc\xbf\xb3\xa8\xd9N\r\xd7\x99\xbc\xd3\xed\xbf\xc5%\x9f\xcc\xbfqd\x01\xfb\xd9\xcar\xbc\xd7\xed\xbf\xc5%\x9f\xcc\xbf\x98?\xbeC\'5g<\xda\xed\xbf\xc5%\x9f\xcc\xbf!d\xa0\t\x05\xc2\x8e<\xdd\xed\xbf\xc5%\x9f\xcc\xbf\xa5\xe5\x8f1\xdbJ\x9f<\xde\xed\xbf\xc5%\x9f\xcc\xbf\x1a\x0b*\xce\xca\x0ft<\xdd\xed\xbf\xc5%\x9f\xcc\xbf\x7fc\xff\xed\xc5\x9bv\xbc\xdb\xed\xbf\xc5%\x9f\xcc\xbf\xc0 \xeaA(p\x88\xbc\xd8\xed\xbf\xc5%\x9f\xcc\xbf'
|
179 |
+
p77
|
180 |
+
tp78
|
181 |
+
bsS'_mean'
|
182 |
+
p79
|
183 |
+
g64
|
184 |
+
(g65
|
185 |
+
(I0
|
186 |
+
tp80
|
187 |
+
g67
|
188 |
+
tp81
|
189 |
+
Rp82
|
190 |
+
(I1
|
191 |
+
(I40
|
192 |
+
tp83
|
193 |
+
g74
|
194 |
+
I00
|
195 |
+
S'+\xc4\xbcx\x90\x9b\x19\xc0\xf7\xc2:\x7f<\x0cV\xc0<%\x19\x9a\x87\xee4\xc0\x0c\xc6r\xc3^\x86K\xc0R`B\xfat?<\xc0nI\xd2\x0f\x93\xbc6\xc0\xe3\x99\xe1U@\x916\xc0\x99\'U\x8e\x02\xf9\xf8\xbf\x08!\x9c\xee\x16\x9f<\xc0Au\x8a}\xc3l6@wN\xe1D)\x845\xc0E\xa7\xb6\x90;\xc7K@R!\xe1K\x10\xf4"\xc0\xc5\xfa\xdd\x11D\xf3U@\x81\x8c\xc0\x9c\x9fN5@\x90w\t\x03\xf1\x8aL@p\x9f\xdc\x1e\x19\x87A@g\xf04\xe9h\x989@\x1b\x9a~\xd2}\nC@B\xebG\x95\x95\x95\xb4\xbfp\xa4q\x1c\xfb\x04B@a]\xfbU6\x977\xc0\xb5\xa4|\xd9\xf5n7@\x86=\xd3)\x16\xc5K\xc0\xb8z\x156\xb2\xf8\x14\xc0n\x16\xaf\xf0-\x9aR\xc0\x8e\xda=c\x88\xa8 \xc0\x07\xa2\x8d\xd5\xcc\xc66\xc0\x17PH\xd2\xae\x11\x19\xc0\xccP\x89\x14\x0b\xca\xf2\xbf@\xfd}\xef\x881"\xc0h\xeb\xb8\xfd7\x0f7@\xcf\x1f<\x11;u\x1e\xc0\x05\xc1"\xb8\xb5\x81R@\x0fV\xe42E\x7f\x15@}\x8d\x1f+\x13\x987@\xd6 yRQK!@(\x17W\x12\xec\x89\xef\xbf\x02\x8fH\'\x96\x80\x19@%n69x\x0c7\xc0'
|
196 |
+
p84
|
197 |
+
tp85
|
198 |
+
bsS'template_instance'
|
199 |
+
p86
|
200 |
+
g0
|
201 |
+
(cmenpo.shape.pointcloud
|
202 |
+
PointCloud
|
203 |
+
p87
|
204 |
+
g2
|
205 |
+
Ntp88
|
206 |
+
Rp89
|
207 |
+
(dp90
|
208 |
+
S'points'
|
209 |
+
p91
|
210 |
+
g64
|
211 |
+
(g65
|
212 |
+
(I0
|
213 |
+
tp92
|
214 |
+
g67
|
215 |
+
tp93
|
216 |
+
Rp94
|
217 |
+
(I1
|
218 |
+
(I20
|
219 |
+
I2
|
220 |
+
tp95
|
221 |
+
g74
|
222 |
+
I00
|
223 |
+
S'+\xc4\xbcx\x90\x9b\x19\xc0\xf7\xc2:\x7f<\x0cV\xc0<%\x19\x9a\x87\xee4\xc0\x0c\xc6r\xc3^\x86K\xc0R`B\xfat?<\xc0nI\xd2\x0f\x93\xbc6\xc0\xe3\x99\xe1U@\x916\xc0\x99\'U\x8e\x02\xf9\xf8\xbf\x08!\x9c\xee\x16\x9f<\xc0Au\x8a}\xc3l6@wN\xe1D)\x845\xc0E\xa7\xb6\x90;\xc7K@R!\xe1K\x10\xf4"\xc0\xc5\xfa\xdd\x11D\xf3U@\x81\x8c\xc0\x9c\x9fN5@\x90w\t\x03\xf1\x8aL@p\x9f\xdc\x1e\x19\x87A@g\xf04\xe9h\x989@\x1b\x9a~\xd2}\nC@B\xebG\x95\x95\x95\xb4\xbfp\xa4q\x1c\xfb\x04B@a]\xfbU6\x977\xc0\xb5\xa4|\xd9\xf5n7@\x86=\xd3)\x16\xc5K\xc0\xb8z\x156\xb2\xf8\x14\xc0n\x16\xaf\xf0-\x9aR\xc0\x8e\xda=c\x88\xa8 \xc0\x07\xa2\x8d\xd5\xcc\xc66\xc0\x17PH\xd2\xae\x11\x19\xc0\xccP\x89\x14\x0b\xca\xf2\xbf@\xfd}\xef\x881"\xc0h\xeb\xb8\xfd7\x0f7@\xcf\x1f<\x11;u\x1e\xc0\x05\xc1"\xb8\xb5\x81R@\x0fV\xe42E\x7f\x15@}\x8d\x1f+\x13\x987@\xd6 yRQK!@(\x17W\x12\xec\x89\xef\xbf\x02\x8fH\'\x96\x80\x19@%n69x\x0c7\xc0'
|
224 |
+
p96
|
225 |
+
tp97
|
226 |
+
bsS'_landmarks'
|
227 |
+
p98
|
228 |
+
NsbsbsS'similarity_weights'
|
229 |
+
p99
|
230 |
+
g64
|
231 |
+
(g65
|
232 |
+
(I0
|
233 |
+
tp100
|
234 |
+
g67
|
235 |
+
tp101
|
236 |
+
Rp102
|
237 |
+
(I1
|
238 |
+
(I4
|
239 |
+
tp103
|
240 |
+
g74
|
241 |
+
I00
|
242 |
+
S'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
|
243 |
+
p104
|
244 |
+
tp105
|
245 |
+
bsS'_weights'
|
246 |
+
p106
|
247 |
+
g64
|
248 |
+
(g65
|
249 |
+
(I0
|
250 |
+
tp107
|
251 |
+
g67
|
252 |
+
tp108
|
253 |
+
Rp109
|
254 |
+
(I1
|
255 |
+
(I36
|
256 |
+
tp110
|
257 |
+
g74
|
258 |
+
I00
|
259 |
+
S'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
|
260 |
+
p111
|
261 |
+
tp112
|
262 |
+
bsS'_target'
|
263 |
+
p113
|
264 |
+
g0
|
265 |
+
(g87
|
266 |
+
g2
|
267 |
+
Ntp114
|
268 |
+
Rp115
|
269 |
+
(dp116
|
270 |
+
g91
|
271 |
+
g64
|
272 |
+
(g65
|
273 |
+
(I0
|
274 |
+
tp117
|
275 |
+
g67
|
276 |
+
tp118
|
277 |
+
Rp119
|
278 |
+
(I1
|
279 |
+
(I20
|
280 |
+
I2
|
281 |
+
tp120
|
282 |
+
g74
|
283 |
+
I00
|
284 |
+
S'(\xc4\xbcx\x90\x9b\x19\xc0\xf6\xc2:\x7f<\x0cV\xc09%\x19\x9a\x87\xee4\xc0\x0b\xc6r\xc3^\x86K\xc0N`B\xfat?<\xc0mI\xd2\x0f\x93\xbc6\xc0\xdf\x99\xe1U@\x916\xc0\x99\'U\x8e\x02\xf9\xf8\xbf\x04!\x9c\xee\x16\x9f<\xc0@u\x8a}\xc3l6@tN\xe1D)\x845\xc0D\xa7\xb6\x90;\xc7K@N!\xe1K\x10\xf4"\xc0\xc4\xfa\xdd\x11D\xf3U@~\x8c\xc0\x9c\x9fN5@\x8fw\t\x03\xf1\x8aL@m\x9f\xdc\x1e\x19\x87A@f\xf04\xe9h\x989@\x18\x9a~\xd2}\nC@/\xebG\x95\x95\x95\xb4\xbfm\xa4q\x1c\xfb\x04B@`]\xfbU6\x977\xc0\xb1\xa4|\xd9\xf5n7@\x85=\xd3)\x16\xc5K\xc0\xb6z\x156\xb2\xf8\x14\xc0m\x16\xaf\xf0-\x9aR\xc0\x8b\xda=c\x88\xa8 \xc0\x06\xa2\x8d\xd5\xcc\xc66\xc0\x13PH\xd2\xae\x11\x19\xc0\xccP\x89\x14\x0b\xca\xf2\xbf=\xfd}\xef\x881"\xc0g\xeb\xb8\xfd7\x0f7@\xc9\x1f<\x11;u\x1e\xc0\x04\xc1"\xb8\xb5\x81R@\x0cV\xe42E\x7f\x15@|\x8d\x1f+\x13\x987@\xd3 yRQK!@&\x17W\x12\xec\x89\xef\xbf\xfe\x8eH\'\x96\x80\x19@$n69x\x0c7\xc0'
|
285 |
+
p121
|
286 |
+
tp122
|
287 |
+
bsg98
|
288 |
+
NsbsS'global_transform'
|
289 |
+
p123
|
290 |
+
g0
|
291 |
+
(cmenpofit.transform.homogeneous
|
292 |
+
DifferentiableAlignmentSimilarity
|
293 |
+
p124
|
294 |
+
g2
|
295 |
+
Ntp125
|
296 |
+
Rp126
|
297 |
+
(dp127
|
298 |
+
S'_h_matrix'
|
299 |
+
p128
|
300 |
+
g64
|
301 |
+
(g65
|
302 |
+
(I0
|
303 |
+
tp129
|
304 |
+
g67
|
305 |
+
tp130
|
306 |
+
Rp131
|
307 |
+
(I1
|
308 |
+
(I3
|
309 |
+
I3
|
310 |
+
tp132
|
311 |
+
g74
|
312 |
+
I00
|
313 |
+
S'\xfb\xff\xff\xff\xff\xff\xef?D\xbc\xd8\x8bG\xe0k<\x00\x00\x00\x00\x00\x00\xe8\xb9F\x87\xeb\x1b:`_<\xff\xff\xff\xff\xff\xff\xef?\x00\x00\x00\x00\x00\x00\xd0\xb9\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0?'
|
314 |
+
p133
|
315 |
+
tp134
|
316 |
+
bsg113
|
317 |
+
g0
|
318 |
+
(g87
|
319 |
+
g2
|
320 |
+
Ntp135
|
321 |
+
Rp136
|
322 |
+
(dp137
|
323 |
+
g91
|
324 |
+
g64
|
325 |
+
(g65
|
326 |
+
(I0
|
327 |
+
tp138
|
328 |
+
g67
|
329 |
+
tp139
|
330 |
+
Rp140
|
331 |
+
(I1
|
332 |
+
(I20
|
333 |
+
I2
|
334 |
+
tp141
|
335 |
+
g74
|
336 |
+
I00
|
337 |
+
S'+\xc4\xbcx\x90\x9b\x19\xc0\xf7\xc2:\x7f<\x0cV\xc0<%\x19\x9a\x87\xee4\xc0\x0c\xc6r\xc3^\x86K\xc0R`B\xfat?<\xc0nI\xd2\x0f\x93\xbc6\xc0\xe3\x99\xe1U@\x916\xc0\x99\'U\x8e\x02\xf9\xf8\xbf\x08!\x9c\xee\x16\x9f<\xc0Au\x8a}\xc3l6@wN\xe1D)\x845\xc0E\xa7\xb6\x90;\xc7K@R!\xe1K\x10\xf4"\xc0\xc5\xfa\xdd\x11D\xf3U@\x81\x8c\xc0\x9c\x9fN5@\x90w\t\x03\xf1\x8aL@p\x9f\xdc\x1e\x19\x87A@g\xf04\xe9h\x989@\x1b\x9a~\xd2}\nC@B\xebG\x95\x95\x95\xb4\xbfp\xa4q\x1c\xfb\x04B@a]\xfbU6\x977\xc0\xb5\xa4|\xd9\xf5n7@\x86=\xd3)\x16\xc5K\xc0\xb8z\x156\xb2\xf8\x14\xc0n\x16\xaf\xf0-\x9aR\xc0\x8e\xda=c\x88\xa8 \xc0\x07\xa2\x8d\xd5\xcc\xc66\xc0\x17PH\xd2\xae\x11\x19\xc0\xccP\x89\x14\x0b\xca\xf2\xbf@\xfd}\xef\x881"\xc0h\xeb\xb8\xfd7\x0f7@\xcf\x1f<\x11;u\x1e\xc0\x05\xc1"\xb8\xb5\x81R@\x0fV\xe42E\x7f\x15@}\x8d\x1f+\x13\x987@\xd6 yRQK!@(\x17W\x12\xec\x89\xef\xbf\x02\x8fH\'\x96\x80\x19@%n69x\x0c7\xc0'
|
338 |
+
p142
|
339 |
+
tp143
|
340 |
+
bsg98
|
341 |
+
NsbsS'allow_mirror'
|
342 |
+
p144
|
343 |
+
I00
|
344 |
+
sS'_source'
|
345 |
+
p145
|
346 |
+
g136
|
347 |
+
sbsS'model'
|
348 |
+
p146
|
349 |
+
g0
|
350 |
+
(cmenpo.model.pca
|
351 |
+
PCAModel
|
352 |
+
p147
|
353 |
+
g2
|
354 |
+
Ntp148
|
355 |
+
Rp149
|
356 |
+
(dp150
|
357 |
+
S'centred'
|
358 |
+
p151
|
359 |
+
I01
|
360 |
+
sg86
|
361 |
+
g0
|
362 |
+
(g87
|
363 |
+
g2
|
364 |
+
Ntp152
|
365 |
+
Rp153
|
366 |
+
(dp154
|
367 |
+
g91
|
368 |
+
g64
|
369 |
+
(g65
|
370 |
+
(I0
|
371 |
+
tp155
|
372 |
+
g67
|
373 |
+
tp156
|
374 |
+
Rp157
|
375 |
+
(I1
|
376 |
+
(I20
|
377 |
+
I2
|
378 |
+
tp158
|
379 |
+
g74
|
380 |
+
I00
|
381 |
+
S'\n\x10\xa7&WA$@X\xde\x0f\xf5\xdesU\xc0j*\xf7\x87\x1a\xbf6\xc0\xec\xd1\xdaw\xe1\xa9K\xc0\xf9\xaa\x1aC\xb2pB\xc0k2\xe7r\xc8\'8\xc0\xf6\x90\x08\x1b\xd6\xc8@\xc0\x91\x88g@\xdfK\t\xc0\xf6\xcd\x8d\xf1\xe9\x87B\xc0\x0c\xe9h\xae\xb8\xfb3@\xf1\x0f_\xb4\x7fI6\xc0\xa7\x9dC\xb1\xe7KL@(\xf2(V\xe1}\x14@\x0e\x05n]\x7f\xc8W@b\x15%\xa4b15@A\xbc&\x9a\x04dL@O5\xf38N=?@\xc8\\\xd4\x14\xa6\xe35@\xdb\xa8-\xe7\xa6\xc1@@\x15\xddg\xb6jX\xd0\xbf\x1e\xb3o\x10\xe4\xf2@@S\xfd\xfd\xea,{8\xc0~\xb7\xd8\x9a?\x878@g\x97`\xadH\xc2K\xc0r\xd4i0Y\n @F\x04{\xa6v\xb3R\xc0\xc3x\xac\x11\xcf\xa9\x1b\xc0\xb5\xa4f\x06n\x8a7\xc00\x04\xcf\xbfH\x89"\xc0\xb2&"\xb8\x9b\xe4\xe4\xbf~+V*\x13\xd0"\xc0\xc5j\xf9\xfeu%4@\xe4\xb9\x0e\xf6D\xc6\x19@uk\xa1\xe0p\x85T@S\xec\x0b\x93@R\xd8\xbf\xd9JD\r\xdd=4@wG\xe3\x88\x0c\x00\x10@\xc6\x18\xd9\xf3Vr\xf2\xbfb\x92\xd2\xe1\x93\x7f\xdb?\xac\x0c\xbaa\x15\xfe7\xc0'
|
382 |
+
p159
|
383 |
+
tp160
|
384 |
+
bsg98
|
385 |
+
NsbsS'_eigenvalues'
|
386 |
+
p161
|
387 |
+
g64
|
388 |
+
(g65
|
389 |
+
(I0
|
390 |
+
tp162
|
391 |
+
g67
|
392 |
+
tp163
|
393 |
+
Rp164
|
394 |
+
(I1
|
395 |
+
(I36
|
396 |
+
tp165
|
397 |
+
g74
|
398 |
+
I00
|
399 |
+
S's\xebe\x0f^\x07\x90@q\xb5\xf4|\x96\xc6\x8f@\xb4\xac!`@D\x80@Ud X\xc2\x9fb@\xedv\x8f\xee\xc0R[@v\xe8O\x95\xe6\x9eD@\x9a\xc0\xbc\xcb\xf0\x04A@\xcb\xfb\xfch)\xa5<@~A\xc5\x0bs\x837@\xb7\xf8a\x97\x8aL4@\xe1g\x89\xf7\xf8\xeb/@1{&\xdfD\x94,@MB\xeb\x82?u*@nr\x81\x8d\xd6\xb9!@\xb8\xf6`\xaa\x9f[ @]\xbe\r\xbe\x15?\x1a@\xd7\xcc\xa8\xe7\x87\x8b\x18@:\x98\xc5\x83\xdf\x02\x15@\xfd\xb5(CNV\x12@.\xa4\x8a\xf7\'\x1c\x11@\xb2\xc2\xe8L"\xab\x0e@Aw\xd6\x11\xb9\xb7\n@\xbe\xf1\xae\xb2\xa5\x8c\x04@e\xf0\x99\xffU\x7f\x02@\xf2\xd5X\x92\xb4\xcb\x01@p@\x1f\xf0F\x9e\x00@MR\x15\xd8\x9a(\x00@]\xf5\xbd\xdfy\x04\xff?\x9c3 \xa9\xf3\x9f\xf6?\x81\xab\xf2\\\xef\x8f\xf5?\x86\xd9\xe6\x17\x7f4\xf3?\xfey[b\xe4\xfc\xf0?\nx\xc8\x91F3\xee?\x1etI^0\xba\xea?\x8eh("\x98\xcf\xe0?\xef\xf5\xcdhz\x95\xd8?'
|
400 |
+
p166
|
401 |
+
tp167
|
402 |
+
bsS'_trimmed_eigenvalues'
|
403 |
+
p168
|
404 |
+
g64
|
405 |
+
(g65
|
406 |
+
(I0
|
407 |
+
tp169
|
408 |
+
g67
|
409 |
+
tp170
|
410 |
+
Rp171
|
411 |
+
(I1
|
412 |
+
(I1
|
413 |
+
tp172
|
414 |
+
g74
|
415 |
+
I00
|
416 |
+
S'j\xd0r@\xe9=\xca?'
|
417 |
+
p173
|
418 |
+
tp174
|
419 |
+
bsg63
|
420 |
+
g64
|
421 |
+
(g65
|
422 |
+
(I0
|
423 |
+
tp175
|
424 |
+
g67
|
425 |
+
tp176
|
426 |
+
Rp177
|
427 |
+
(I1
|
428 |
+
(I36
|
429 |
+
I40
|
430 |
+
tp178
|
431 |
+
g74
|
432 |
+
I00
|
433 |
+
S'\x98\x8a\xbb\xd6\x0c!p\xbfCJ_41\x93\xd7?\x7f\xd9J\xdesU\xc4\xbf\xb1\x85/\xd3~\x11\xab?@m\xcdETS\xcb\xbf\xc6ey8\x05O\xb3\xbf\xd0\n\xa2\x9ffA\xcd\xbf\xa1e\xb3-\xdd\xaf\xbd\xbf\x90\x04(\x88\xfc-\xcb\xbf\xdad\x9c_07\xc2\xbf\xf6\xa7\xacU\x07\x89\xc3\xbf"\x91\xa5k\xf9.\xa7\xbf"F\xd1!\x06\x8c\x88\xbf\x0b\xe6\x05:\x86\x8e\xa8?\xbc\x1c\xac\xde\xaeZ\xc6?\xa6\xc4\x06\xb69\x19\x90?\xd3\xac*Y_\xdf\xce?9\xdd\x16\xcd\x89\xb8\xae\xbf\xf0U\x8e{Y1\xce?\xb4\xec\x0cjIb\xa6\xbf0\xbd\xbe\xe6\xfc\x00\xcd?\x12p\xd6@\x0cB\x84\xbf\xaa\xf6\xb69\xa3\x1f\xc3?\x12^Nid\x85\xba?k\xca\xfd\x8a\xef#\x95\xbfw^\xbfOZE\xd5?u\xba\x1f5\xf7O\xc9\xbf\x0f?SD\x880\xa8\xbf\x0f\xba\x89WU\xcc\xca\xbfO\xc1\xc8\xecWD\xb8\xbfE\xf8\xf9\x9c\x04\x13\xc8\xbf\xd9}Y`\xce\x1d\xbf\xbfI/\xfa\x07\xc7\xbc\x99\xbf\x9c\x9d\x96\x93\x8d\x96\xa4?-0(\x88\xc5h\xcb?\xee\x02\x81\xack3\xb8\xbf\x18\xbfS\xcbW\x1a\xc9?\x12\xec\xf6B\x16\xe1\xb2\xbf2\x98\r?>\xa7\xc7?\x01\xb1\x0f\xb4\xa5\xbf\xa2\xbfCjX+\xd2\xf1\xad?mZ8\xdbX\x18\xd2?eCt<\x146\xb6?\x81\xb0\xd1\xd5\x04B\x8b\xbfA\xb6-\x1e\xf5(\xb8?\x13\xc8\xe9Q\xae\xa7\xca\xbf\xd3M\x95\x1cld\xba?H\x16\xab`\xa3\xa0\xca\xbfJ\xe0\x89\tCD\xb7?X\xa2\x12\xc4\xd8\xe9\xc3\xbf\xc1\xc8k\x0e$\x05\xb8?*\x15aA\xbc\x8d\xb0?D\xaauS\xba{\xa2?\xb0{\xdd\x0cI\xcc\xdb?\xb4\xd6\xf5\xc5\x15=\xad\xbf\xa9[\xcd\xf2\xc8Q\xc3?e$\x1f\x0et\x19\xbe\xbf\xadp\xf5\x05\xbf\xb9\xa6\xbf\x00\x12\xe9\xed"y\xc1\xbf\xc6\xb9\x07\xab\xb9\x03\xb9\xbf\xbf^i\x8ez9\xc3\xbfD\xeb\xf7\x85\x95m\xb7\xbf\xd7\xbaHm\x99S\xbc\xbf\xf3\xb3U\xff\xcc\xe6\xb3?\xd2\xb6T8\x94\xf0\xac?G\xd9\xd8\xda\x1e\x8f\xd0?W\xac\x07\x1f\x99\xce\xbb?\x16\x8aU\xf4~\xec\xc5\xbf[U\xf7;\xd3x\xbe?ybA\t\xaa\xf7\xc5\xbf\xda\xf3\x83\xe7\xe4\xde\xbd?\xb9\x00\n\xc0\xf1\xab\xbc\xbf\xecA\xb7\x94\x96*\xa0?F\xb9\xbe\'\x04\x14\xd9?\xac\xec\xf2\x10\xecN\xbe\xbf0n6\x93i\xbb\xb8\xbfg(y>N<\xc2\xbf{\xdeG\xa8\xca\x03\xc3\xbf\x95\x01X\xc9\x96\xdf\xc4\xbf\xa9\xc4\x02\xfb\x932\xc2\xbfy\x83\x96\x93\xa7\xcb\xdb?\x08\x85s\xc0\xef=\xaa\xbf \xf6\x7f\xad\x87\x0f\xa0\xbf\rP5\'\xfc\x1d\xa1\xbfH\x1e\x13\x14h\xad\xd1\xbf\xf5\xfb\xf7\x0b\xab\x81S?-\x91\x87D\x96b\xd0\xbf\xf0\xf4\x07Mh\xa3\x96?0\xd6H\xd7K5\xd1\xbfi\xb9\x88@\\n\x87?.\x1c\xd2s0\x1a\x87\xbfS^\xb8\xdb\xd5@\x92?\x1bXzC\xe8\xa3\xdb?\x0f\xfc1%\x1aH\xa0\xbf"\x80\x0f\xaf/\x98\xbd?t)\xe4\xfd\xa3\xe2\x84\xbf\xe8\xfesU\x89\xe4\xa8\xbf\xe4XK\xeem\xb9\x8e?\xd1\xc4\xedl\x07\xcd\xb1\xbfc\xda\xbb\xf5\x18\xdd\x9e?!\x1e\x96:T\xb4\xa7\xbfmW~\x07u\xdf\x8d?\xd5_d\xf6\xf1\\\xbb?4>\x87\x8e\xac\xe9s?m\xe6\xeb\xe9>\xff\xd3?\xcd\x8dv\xcf\x8fs\x9c?\xbat\xcbh\xff\xa6v\xbf\n\xb8q\x94\x92\x98\x7f?\xbf\xe6\x7f*\xfc\xba\xa0\xbf$\x89\x1d\x9d=u\x98?(V\x9bv\xff\x9b`?g.\x15C>~\x86?\x90%y\xbf\xb3\x96\xd4?\xe3\xf2\xb5\x10\xe1\x89\xb9\xbfJ\x97\xd6\x9bS\xc5\xcb\xbf!!!!\x9e\xf1k?_ \x83qqW\xcf\xbf*s\xb9a]\xf0\x95?5\x8bbQu\xba\xcb\xbf}:\xc0\xe8\x86\xd9\x8c?E\xf3\x82\\1\xdf\xc9\xbf\x9b\xc8H\xf3\xcf\xef\x8a\xbf\x8bg7\xad6:\xab\xbf\x0c\xb9Nu\xea\xab\xac?`w\x8d\x01\xe2^\xb2\xbf\x8e|\xc2\x98k"\xb6?\xd8\xca\xdf\xd9\x87w\x91\xbf\x0eP\x903\xae\xafD?j\xc3\xed\xdc8\xbb\xb1\xbf\xa8\xed \x11G\xc2\xba\xbf\xf6\xa0\x97Jm\x18\xaa\xbf\xe5\x96#\x18\xc9K\xb1\xbfx\xe0K`\xf2\x95\xc8\xbf\xfe\x8b\x06\xbb&\xa3\x8a?\xf1\x93\r\xa0D\xd4\xbf?I\xfa\xc4\x07\x8d\x80\xa7\xbf\xce\xc5\xa6\xe4t\xab\xd1?\xa5\xb7\xe5N\xa7\x04\xb1\xbfY_\x8fJX\xd6\xd2?\x86\xa3\xd4h\xa8\x8c\x98?\xae\xed\xed\xdeHO\xd2?F\xf8\xb7\x04\xf3\xf4\xb1?\xb7\x06\xc5\xbd3\x19\xc1?\xf1\xf6\x17OF\xb6\xa9?\x15;\xddI\xc8\n\xc0\xbf\xdb03\xee\xf0n\xae?\x93\xf5\xf8\xdd\xcc\xd3\xcf?\xa6\xab\x0ff.\r\xb7?y\xc2\xb7:#\xbc\xd0?\xf5\xed\xf9\x92\xb4{\x90?\x11\xa4\xd6\xf5w1\xd0?\xc6\x1eh\x98N\xd9\xb5\xbf\x19\xebX\x91o\xe2\xbc\xbf"g\xd9\xa2\xc4\xed\xb0\xbf\xb56\x96XG\x00\xd5\xbf\xbf\xabS}\xe1d\xbb\xbf2\xf0+\xc6\xad\xa3\xd4\xbf\xf9Bf\xb1\rv^?\xfb\xc6\xef\xf9gE\xd5\xbf\xa4\xc3\xb2,\x1b\x96\xb6?`WC\x02E\'\xa0\xbf\x8e\xc8l\xff!\x88\xce?\xce\xd07*\xb0\xb7\xa8\xbfO\x16#+\x93p\xb8\xbf\xd9\x08\xcfd\xf0\x8fz?;Y\xc7\x15c\xbd\xd0\xbf\x83\xe4\xd9e\xb1_\xab?\x98\x1e\xef\x84\x0eP\x80\xbf22\xeby\xe0]Y?\x97\xda\xa4\xe4\xdd\xb4\xd2?\xbfao\xf9\xb5\xd8\x9f\xbf\xa8\xec\x9c\xb1CV\xc0?R\x94\xc7\xf8Y\x97\xa0\xbf\xd0\xa8,\x07\xe2[\xd2\xbf\x16,\xadR\xa2\x07\xb0\xbf6\xbf\xdd7\xfe`\xc0?\xe7\xcf\xfe\x96\x93\xf6\xab?\x92r\r\x19\xd3F\xd2?x\xff\xb1(><\xbe?\x1c\x1f\\AN\xc6\x92\xbf\x93Q\xcb\x1e2\x07\xb1?\x0f\xc2\xf9\x06\xf0g\xd1\xbfk\xe1N\xd1l\xd8\xaa\xbf\xaco,F\xf7A\xbe\xbf&!\xf2Z\x9d\xcc\x88\xbfG\xa9\xe6\xdc\xa6|\xcd?_7\x90\x8a\xa5\x0f\xac?!\xd8\xd4\xd9\xfc\x0c\xd1\xbf6\x8bo\xdbxF\xb8?\x9aC\xbb\xdaV\xb8\x86\xbf\xb1\xbf[|\x98\xc0\xab?\xd0f\xe2=YS\xd2?Y\xabgp\x8a\xd7\x90\xbfv\r\xf8r\xd7h\xd0\xbfk\xcd\x98\x88\xa6\x8e\xb8\xbf\xd9\xd7\xeco\x9fP\xd2?{\x9d\xcc\xa0\x92\xb0\x9f\xbfL\x0e\x98\x0c\x87\xca\x8b\xbf\x0c\xd8m\xe81<\xb7\xbf\n:\xe2\xb9\x06\x17\xd1\xbfDu\x87<\xf5\x87\xc3\xbf\x19\xc8\xd7$\x93\xbf\xb0\xbf0\x9f4\xb3H\x87\xb1\xbf\x8e^t9I\xf6\xcc\xbfs\xd3\xf9\x10\xa2\x89\x8d?V\x08\xadwN\xe9\xd0\xbfP\x80\x92e\xdc$\x91?^={\xc5\xca\xbe\xcb\xbf\x19\x91\xc3\x8b^\xaa\xa4?"a\x8b\x12\x06\x86\xd0\xbf\xacxjPh\xd9\xb6?\xd0\x8d\x99\x1b`\x14\xc7\xbf\xc2\\T\x9b\x97\xfa\xc2?\x16\xb7+\xfb\xa4\xce\xb3\xbf\x92\x8b\xce\x0b\x9c\xaf\xc5?\xeeE\xe2Z\xf7+\xc9?\x18\xeaK\xb4R\xbf\xaa?\xfb\x85\xee\xffj4\xd6?>\xf8\x17\xe1\x15\xecy?\x04Dn\xbf/\xa3\xd8?\xed\xb4}\xc1\xd8\x8c\xa7\xbf"\x0c\xaai$\xc5\xd5?D:R3\x8b\r\xca\xbf\'\xbd\x908\xd3\xab\xc6?\x02\x07\xfeC\x95u\xc1\xbflzj\x1d\xf5Q\xb5\xbfQ^\x19\xa9\xdbj\x95\xbf\xac\xf8\xb4\xe6\x1c\xd6\xbb\xbfn\xdf\xb2\x88\xccR\x8d\xbf\x8d\xb0\x85TWq\xb5\xbf\x85\x84\xaa\x97\x89\xb7\x80\xbf\xea\xeb\x80\xa0g2\xbc\xbfg\xe7\x97\xd3\xf5\x99\xbc?\x8f\x81n\xdb\ns\xb6\xbf\xdet\xcbuP+\xbb?\xf4\xfc\xbez\xa0\xde\xb5?\xe1\xd2\xdc\x15\xf2\x9dp\xbf\x8d\xb7\xa4\xaay\xbb\xc0??\x05^\xf6Y\xb7\xb9\xbf\xb6>\x82\xb7(\xa5\xb9?\xb3/\xbc\x90\xadAp?}7\xd7_5\x05\xbc?\xf7\xaf\xae\xbd[\xfd\xc3\xbf\x0b#\xaaB\x1c\x81\xcc\xbfN\xa2[H\x94\xc2\xc8\xbf\x93\t\x03Fs\xea\xb4?-N\xb4_\xb3\xac\xc9\xbf\x1b\xfd\xe31RU\x85?\xe6\xc9\xb7\xdb>!\xc8\xbf\xae\xc5n\x00\xf9\xaf\xb8\xbf\x1fd\xf2[5\x15\xc7\xbf\xf1\xf8\xe3\x18CZ\xc7?\xf7\\H\xb9\x94\xd6w?\x91-h:l\xc8\xb7\xbf\x0c\xad\xdbJ\x05\xb7\xd1\xbf\x06\xa9\x1d\xda\xf9\xc9\xc1?\x8c\xdd\xb1\x1b\xea8\xa8\xbf\x05\xf7\xab\xaa\xe2\xa7\xb6\xbfE\x95\x19\xac\xd5\xa4\xa3\xbf\t\x0b\xb6\x86\x80\x8b\x89?"\x9d\xe5\xff\xef>\xb0\xbf\x88~J\x05\\G\xb1?etI\xed7\x96\xd3\xbf\x03\xea\\\x19`R\xc4\xbfw\xdbPQ\xf9\x01\xa4?\xc0\xdd\xcc\x0f\xd6\x0c\xa0?Z\x8bv\xd8`\xf3\xd5?Re0\xd8\xd9 \xb7?\xed\x9d\xdd\x18di\xd3??\xf8\xc5`@\xb2\x9d?)P\xc3\xa6]Q\xd5?\xb2\xdd\xdc\xf1\xcaI\xb5\xbf\x989\xb2j\x94\xb0\xa1?\x94\x80 #$\x9f\xa0\xbfI\xd4<\xf8$X\xca?\x8an\x026!$\xb8\xbfOt\xef\xdb\xf1\xcd\xc9?\xeaX\x99\xc5!\xaa\x95?\x84\x97\xecy\xb8$\xc7?\xc6\xa3\x16\x85\xc8*\xb8?\xe1WA\xae\x1f\xd6\xbc\xbf\xde\xdbw?\x05\x0f\xb2\xbf\xefC\xa2\xd7C\x82\xc5\xbf\xe8\x0c\xd5wU^\xd8?\xeb\xb3\xca$\x18\xa5\xbe\xbfy1`\xe7\x1d\x8c\xc1\xbf\xc7\xf6<$\x82\xf1\xc5\xbfU\xef\xdf\xce\xbb\xfd\x94\xbf\xcd\x08\x8c<X\xaf\xbf\xbf\xff\xfc\xe0\xf9I\x11\xb2?\x8a\x889\xd3Pq\xc3\xbfo\xca\xd5\xc2_h\xd1\xbf>e\xd9\x16k\xb6\xb4\xbf\x14\xff~(\x8a\x91\x89?f\xb2\xebW}6\xce?\x9c\xcb\x84F\xd8H\xc5\xbf*\x19\xeb\xf0vQ\xc3\xbf}\xe3d\x8e\xe5)\xbd?\xff\xac\xfe\xb9\x7f"\xc8\xbf"\xa5U\r\xff\xbc\x92?\x10\x04A\x12\xc4\xeb\xc5\xbf\xdd\xc3\xb5\x1a\xe8\xdb\xb5\xbf>\x0c\xcf>\x9d\xbf\xcb?v\x9f\xc2t\xe6\x8f\xd1?\x19\xaf\x95\x97X\xcf\x85?\xd2\xba}\xd3\xf8?\xc6\xbf& \xee\x96}\x8b\xcb?\x82q\xbd(\x9e\xef\xc3\xbfB\x89\x1a\x7f\x93B\xca?\xe9^\xb1-b\x88\x7f\xbfJ\xa3+]\xb7\x00\xcd?\xd2\x97\x9a\n\xd6\xdb\xbf?\xf6\xcb.\xc7G<{\xbf\xcc\xcc\x8b\xc4\xeaE\xc2?\xf1\x16E1T\xce\xc1?\n~P\xedj\xfe\xbd?\x89\x0b\x8f\x1b@R\xb2?.\xb7\xfe\xff5\xd2\x85\xbf\x87{\x81\x16\xc9\xeb\xbf?\xc8B\xda\x19\xf2\xd9\xc3\xbfz\xff\xd4\xc5\xd8\xa1\xb8\xbf\x90\xbcs\x9e\x0c\xde\xcd?~\x9aA\x16\xb4\xda\xb0?\xbd\xcf\xce":\xec\xcb\xbfx8h-\xb52\x97?\xeb\x82<\xe5\xa9-\xbb?d.fx\x92\xd8\xc1\xbf3\x8d\xa0D\t9\xac?\x1eh\xa9Pz\xb7\xb1?k \xbb\xb4\xf3\x03\xb1\xbfW\xab\x90"P\x02\xb8?\xab\xd7\xde\xec\xdc\xff\xb9\xbf\xdbW\xff\xeeM\xc5\xc6\xbf=\xbb7wK\xcb\xb3\xbf\xe1\xceg3\xd96\xdc?\xd9(\x1bs\xd7\x1f\xbf?\xdfW\xd4gF\x85\xc0\xbfWkw\xd9\xbc\xeb\x8d?\xe9\x98\x03\xb5\x11N\xd2\xbf\xe8\x1a\xfd2\x9e\x1e\xb3?h\xb0\x86I"&\xbc\xbf|5\xc5\x8e\xb4\xad\xba?Yp\x7f\x99\xbe\xbd\xd1?t\xd3\xcd\n,v\xe1\xbf\xbc>\xdb@\x8c\x02\x91?\x94#\xa3g\x8c\x93\xb9?|\xec\x02\x06U0c?\xc3\x9aZ1\xf7\x07\xae?\xfc\x05\xcaN\xef\xe7\x8d\xbf\xd3;h\xac\\\xfb\xaa?\x835\xbf\x15\x0b\x81\xb0?\xc6}\x8f~\x8dJ\x9d?\xf0\xf5(\x8b\xc7\xff\xad\xbfF_\xe0\x83\xee|\xa3?p\n\xa6\xaei\xfd\x9a?q\x1f\x08e:\x99a?\x9d\x01\xb2\x86W!\xc7\xbf\x03\xb8\x95\xf1\x8b#\x97?\x906m\xfc\t\xf8\xbc?\x8e\xfc \xe9\xd4^j\xbf \xc6\xd4\x9d\xc9\x19\xc9\xbfp\x05B\xea\x14\x9e\xa8\xbfg\xe5,p\xf3\xa1\xbc\xbf\x93\xbd\x1d\x14vF\xc8?J\x13\x969\xaf\x90\x8b?\x16*\x1a\xcfOg\x80?\x18\xc7\xd3\xd4\x1b\x94\xa3\xbf\xa5\x88\x04S\xe4\xba\xc2\xbfXCj\xf8\xe9!\xab?\xc3\xcc\xd8-\xa6\xf7\xb9\xbf\xaaS\xc5\x94W1\xc1?\x87\x00:T\x18W\xd8?4j\x86\x9e\xa3\xd7\xbc?\x8c\x06\xa3.\xd7S\xc7\xbf\x0f\xf6}\xfd\xf1\x08\xce\xbf\x0c\x02\x08\xfb\xc7\xee\xda?\xc6\xf9P\xd0\x81\xa9\xbe\xbf\x0es3\xb9MG\xa3\xbfz\x19J\xd8\xc3\x1a\xbf\xbf\x935V_/\xcd\xb2\xbfEX\xe6G\x1c\x9f\x98?\xed\x05)\xf41\xdb\xb7?cBQb\'\x99\xe0?\x06\xe1z\x9c\xa67\xc0?\x84\xa7D\xaa\xf0b\xba\xbf:\x0e\x13\xa2\xac \xb4\xbf=\xd7\x91\x8a\xdc\xcd\x97\xbf\x85t\xc7\xaapHg?\x05\xd6\x13\xf0_\xa2\x85\xbf\x88}t\x8d,\x8d\xc1\xbf:`(&\\|\xa0?\xc7\x1a\xac%\x94m\xaf\xbf*\xd4T|G\x89\xb3?n\xf7\xf4\xda?\x9b\xc3\xbf\xf4"\x8b\x88\x98\xde\x8e\xbfw\xeey\xd2\xa4\xfd\xaf\xbfZ\xa1\xfe-\xbd%\xb1\xbf\x19!\xa0\xabZ\x81\xc1\xbfTOm\xe4,\x8c\xb3?\xd5H6%\x9d\xe8f\xbf\xcb\xfb\x15}\xfd\xfd\xcb\xbf\x98d\xce*\xfe\x82\x8d\xbf\x06_*z\x8fCq?\r\xe6~\x00"\x86\xca\xbf\xcbmw\xf0\x1d\xf2\xb3\xbf\xdaS\xf5+\xfc\xf2\xab\xbf\xee\xda\xd1E\xbdV\x8e\xbfg\x90\xc0\xfe^\xb7\x9e?P\xaf\xce?{{\xa4\xbf2\xd6,K\xfd\x9ek?\x15\xf4=\xa7\xbc4\xba?\x9d\x1b5\x19\x0fr\xd3\xbfz\x91q\xd9H|\xc5?.\xe6k\x84t\x89\xd4?\x10\xc91\x84\x12\xbd\xdb\xbf\xb5?mBy\xd9\xd2\xbf)\x86\xfcI\xd2\x14\xcd\xbf\x7f\xe2\xbd\xb0\xe8\x14\xba?!(\xa4\x8a\n\x10\xa4?M\x7f\xf9\x12B\xbc\xbb?\xe2QZ\xc1\xd2\xd7\xc6?\x003A\xca\x0e\xcb\xa3?`\x89\x15\xd0}\xd4\xd3?7\xd4\xe3>!+\xc1\xbf\xdf\x98\xa3\x9dj\xc0\xc3\xbf\x17^U)jj\xc9?\xd3UO\xeck\xa1\xa1?\xc6\xef\x0e\x87\x1f\xde\x82?\xf2,\x8b2\xb6\xc8\x97?4\xd3\x0f\x1c\xb6\xbb\xb5?"\'|\x8c\xca!x?E?\x92\xb6W\xf7\xb0?W\xec\x9e\x91=\xec\xc8?8\xbb\x17\x89&\xfd\xae\xbf\x17\x97o}V\x88\xbd\xbf\xc9Q?M*\r\xae?\x90@\xe0\x8ds\xcf\xb1?\xc0\x12\xca\xce\x8b~\xac?\xda\x82\xf5,\xd8r\xc3? }T\xb9\xbf{}\xbf\xfb\xc5;t\xd7\xd3\xb3?7\x05\x02\xbd\x1b\x96\xde?J\xf2}\x03i\xeb\xa2\xbf\xc2\x83\xe4\xd4\xd0\x8d\xb4\xbf\xcb\xbeQ\xcfLQP?\xb3t\x8e\xa5\xc8ji?\x10\x1b\xab\x0eO(r\xbfC\x97\x9eg\xd8\xe7~?\x02\r\xf3[\xfb\x07\x9d?\x0e\xb9\xb7X#)\x84?a~S\xeeV5\x92?\xc2\xcb\x1f\x98\xc1a\xb4\xbf9\xf6T\x84\x19\xab\xc6?\\}\xd6+P\x0b\xd6\xbf\x82\xe7\xee\xde[!\xc7\xbf\xf1v\xa6\x18\x81\x99\xbc\xbf\xba\xf1\xdd\xf1\xaa<w?e\xcf\x1cc\x00!\x98?I\xe9Y\x11\x1c4\xb6?\x1dE[\x8a\xa1u\x99?P\xd1\xf4:\xb9\x9a\xc1?\x99\rfE\x97W\xa2?Y\xe2y\x1d\x1c\xa2\xb1?\x9b\x83<\xbbV,\x98\xbf\xfc\xcf\xdciA\n\xc1\xbf\x9f{\x13\x97W\xb8\xdf\xbfJ\x8fl\x19\xaf\xc3\x92\xbf\x83\xecW\xc7[7\x84?C\xeaz\xd1|M\x93?\x01\xa6w\x13W\n\x88?\xff~8\x88`\xf0\xa7\xbf \xf5$\x83\x82\xd0\x9f?\xdd\xe4\xe0z\xdd\x84\x91\xbf>{c{.W\xdf?9G\xd5[Q\x92\xb8\xbf%\xdc\x84\x93sN\x87?Lo\xf0pD&\xaa\xbf6\x0e\xc5\xa5\x0e\xc2z\xbf\xdf\xdd`\x92{\xdc\xa1\xbfq\xef^X\xe82v?S<\x8c\xce\x03f\x9d?X\xe0\n\xaa\x80H\xa6?o\x1c_AJ\xaf\xcb\xbf1\xd1\xb5l\x8f\xee\xda\xbf\xcd\xf6\xa3U\xdc\n\xc6?\x9b.\x7f\x19.\x02\xb0\xbf$^\x84X\x0fw\xd1?F\r\xaaB\xe4\x80f\xbfkm\x18\xdf\x1d(\xaa??\x1e\x92\xd6\x0c\xea\xc0?\x95\xef\x8bj>\xab\xde\xbf\x02P\x9f\xc62\xb6\xca?Gk\xbb\xaeR8\xbc?\xe0\xa6\xe1\xdb|\xed\x82?\xbf\x9aP\x9d\'\xbc\xcf?H\xbb\x98\xe5[\xb2\xc5\xbfC\xad\xb2\xb3\x9a\xba\xc1\xbf\xcc8\x97\x98\x98\xa9\xbb\xbf\xd6\xb6\x8852\'\xbb\xbf\xdd<\xf1\xe2\x11\x15\xb3\xbf\xb17\xfe\xea\xdc\xd2\xb1\xbfx\x1c\tx` b?=\xd7S3\x83\xc8\xce?AlRy\xdf\xad\xcb?\xb7L\xb0E\xc1\x93\xc3\xbf\xbc\xb6\x87\xd7"K\xae?lP\xb9\xee\x941\xbe?\xd0\xcc\x90\x9c\xb2>\xa8?\xea\x9c+F\x91mq\xbf\x7f\x82!\x8e\x0f\xa7~\xbf\x94\xdbfl\xb5\r\xb0\xbf\xdcN\xed\xec\xff\t\xa2\xbf\x10]\x13\x13Q}\x9a?\x90\x92s\xaa(?\x9c\xbfK\xd0\xa5Q\x17p\xb0\xbf\xb4\xdd\xee`TW\x88\xbf{\xc8\xa5)\x89\xc3\xc0?\xfc\xee\xab\r\xf7n\x9e?st\xac\xb4.\xc2\xba\xbf\xb0\'<\x9e\x10\x07\xc6?VR*\xbb\xc1\xc0\xaf\xbf\xb5%\x97ln-\xb5?h&lK\xb0)\xd7?8\xb0\xa7\xfb\xcc!\xd2\xbfn\x07\x9c\xb8]\x00\xd1\xbf\t\\"\x1f@\xfa\xc1\xbf\x86\xa8\xd8e\xcf\x8f\xb8\xbf\x8aJG\xcd\xd7\xe9\xb4?\xa1\xc9P{k^\xcc\xbf;\xcdB\xcb\xac\x82\xb5?b\xc5\xa1\x87\x18\x9b\xde?R\xb6\xeb\xa7\x06\x10\xd2?G\xd1\\4-\x17\xcc\xbfn\xf7\x0e%\xb8\x13\x9b\xbf\x84\xfd\xf0;\xfa\xf4\xb9?\x91S\xf6\xa1G\xb7\xc7\xbf<$\xe6\xb7V\xdb\xa9\xbf:\xeaF\xa0m\xdd\xb2\xbf\xbe\x960\xbdo_\xa5\xbff\x15h*\xde\x1b\x91?\xe5O\xc8i\x16\xc6\xb2\xbf\xd1?\'7\xe3\xa3\xbb\xbf\xbfd\xd4H\xbf\xb9\xb6?\x17\x8e\x7fn.6\xd4?\x01\x16aOI`\xb5\xbf\xc8!\xbd\x15;T\xb4\xbfE gj\x93J\xaa?\x98\x95m!\xda\x94\xac\xbf\x1dIO\xabcV\xa5\xbf\xf3\xf1\xd0[z\x1b\xb3?\x9b\x9c\x0c2\x8es\xa6?m}\x13\x8fT\xd7\xb4\xbf\xff\xd0\x08\x8d\xaf\\\xaf\xbf\xf8\xd3\xebZ\x1c\x1c\x9e\xbf\xf6\xe2\x07\xd4\x15\t\xa2?\x91h\x87Sh\x86\xa2\xbf\xe6\xbf\xfe\t\xfe\xc8\xc0?\x17\xff\xc9\xce\xa4,\xbb?\xa0k\x1e\x88O=\xb3\xbfP\xab\tg\xb8]\xac?\x032\r\x1f\xaa\x05\xb4\xbf\x15\xa9\x19\xcb\xc6\xda\x8c\xbf.\xee\x18+p(\xc7?\x8b\xd0\xc0\xb3\xb6\xe9\xb2\xbf\x18\x8b8(\x92\xbe\xaa\xbf{F5n:2\xc8?\x9c\xb9`\x8e7\xbe\xa4\xbf\x81\xed\xcc\x17F\xf3\xd8\xbf6|\xae6.\xa5\xc2\xbf8\xad\xc6\x9b\xed(\xc8?&F\xdd\xa0\xea\x1e\xad?2\x14m\xe0\x9ai\xb0\xbf\xe4$\xd1\xcb\xfc\xc2\xa3\xbf\xb5^\x9fC\x83Qz\xbf\xf6 \x06\x83\x88p\xb4?\x99W\x85V\x03\xbd\xb3\xbf\xf1U\x98%\xe4\xa2\xb0\xbf\xbf>\xbf<\x92G\xc3?\x84fV#Th\x8c?\xb2\xc2w!\xe6\xd1\xd4\xbf\xa2\xa9\x84\xf5\xc5\xa8\xad?zi\xac\xe5\x07F\xc9?\xd4\xdd\xfe\xd3\\\xd3\xbb\xbfLt"x}k\x8f?\xa4\xcf}y\xcc\xe3\x96?\xc4\x9cXx\x01\x14t\xbfl\xa2\x9fd\xaf\xbc\xb8?\x99\xc1\xe6\x10\xff\x84\xca?.\xd3\xf1\xd0\x9d`~?\xd1\xee\xc5f:;\xd9\xbf\xdf\x8a"q#6\xaf\xbf\x94\xf7]\x0b\xb5\xcc\xcb?\xf0\x8a\x9f\x18\x03\xe1\xa6?0P\x8f\x13\xb4\x81\x90?\xffmm\xd7\xe5\x8b\x9f\xbfG\xc4\xdd\\\n\x1f\xd0?e\x95\xb3\x02\xea\xc2\x93?cm\xf98\x81\'\xd5\xbf\x1c\'\x89\xbf\x03\xea\xa4?q"\x97]Y\x99\xcf?\xde\x86\xc14\xf0p\xd3\xbfE\xcb\xe1\xdd\r\xcd\xc9\xbf7%A\xc47T\xe2?\xce\x806\xa4y1\xbb?\xbe)\xb4h\t\xc6\xbe?\xb1\x88\xfa\xa1!B\xa2\xbfC0\\\x11\x04\xaf\xb4\xbf\x8a\xfb\xeb"\xad/\xaa?\x06\xbb$S\x94\xd7\xca\xbf\xe1\x132\xc4\x0f\xaa\xa5\xbf\xa0\xd48\x9d\xcc?\xd4\xbf\xe2\x8d\xae\r\xb4|\xa3?\xcd\xc5nh.&\xcd?\xc0Ga\xe4W\xea\xd1\xbf\x18\xbd\x99D\xa6\xd8\x8c?G\x18M\x1cp\xde\xaf?lAK\xfb\\&y\xbf\x9e\x89J\x08\xdf\xb9\xa8\xbf\xa8\xfe\xcb"\xf8\xad\x84\xbf\x90m\x8e\x0e\x10\xab\xa3?\xf3\xf8\x88\x96\x80\xaf\x99?\xb2\x9f\x12;h\xd0\xb3\xbfF\x93\xfd\xfd\xa0`\x93\xbf\xe7( \xe2<L\xb0\xbf\xc1m\x17\xf7\xe3\x04\xb3\xbf\xbc\x19.}*\x80\xcc?T\x18\x1b\\i^\xc0?\xe5\xf7pz\x98\xc7\xb3\xbfO\x88\xbam\x96\x82y\xbf\xfd\xfe)vG\x02\x97?q\\\x8cd\xafz\xc2\xbf\x9d\xc2\xd6G\x02\xf2\xb7\xbf\x0f\x00U\x93\xd9\xa8\xb8?\xed\xa2\xbd\xed^\xfd\xd1?v%\x94\xc0z\xfev\xbf4$\xa8N\xc11v?\xfe\xf1\xd8\r\xd5p\x89\xbf\xd0\xeb\xe2\xf6A\xc0\xb9?\x1bo\x15)_\x11s?\x82L\xebD<\xcb\x81\xbf\xa8\xfb@{2\x9c\xb0\xbfLAt/6g\xd2\xbf\x0b\xfc\xca\x8b>\x1c\xd4\xbf}\x92\x080\x8ao\xc1\xbf6\x118\xfe_\xf5\xa8\xbfNI\xf7OW\xda}\xbf\xd2\xc8\x0e)I\xa4\xfb>\xbe\x07m\xa7\x93\xe2t?\xcdh\xa8,\n\xbd\xc3?\x03hh\x18\xf2\x15\xb3?d\xa4\xc0Y\xd4\xc0\xc4?v\xde1\x08\x1f\xf8\x94\xbf\xf2s-\xe0\x00\xd9\xcb\xbfra\x11":\xcc\xd5\xbf\x92Y$\x1d\xc3A\xc1\xbf\x9fy\xfe\xa6\x1bR\xbe\xbf\xe6@\xa9\xe0\xdd\xc9\x90?\x9d\x96]\x0b\xfe#\xab\xbf\xa7\x01\xafEvO\x9d?1\xa8\x1fa\n;\x9a\xbfc`n\xe4\x9c\xa8{?<8\xd8\xa7h\xa3\x9c?r\xab?\xb5\x8c\x11\xb6?&$\xaa\x17\x94\x0f\xa0?R\xae\x80\x15\xc1\xd4\xd3?V\x12\xff\xd1\x1ei\xd5?\xed\xf4\xd211l\xb3\xbf\x08\xce\x01_\xfb)\xb5\xbf,\xac%\xe4\x14T\xb7\xbf^\x0f!\xc2\x03\xe1\xb9\xbfI\x95&\x18_5\xbb?@$\x8e\x82\x14\x1f\xb1\xbf{hE.\x991\xc0?\\P\x8e\xa5\xccG\xdb?\xe2d\xb6\xae\x7f\x88\xc0?Jw%\x8c\xf6h\xba?\xc3Yl\xadr\xee\xa9\xbf)\xbb\x82>oo\xb8?\xa0l\xe3\xfa\xc9k\xc0\xbf{\xdb(JP\x1a\xc2?\x99\xf5\xber\x9dS\xd5\xbf\xaa\x87\xf0\xbb\xe09\xcd?K\x0e\xbf\xa5\x87\xbb\xb5?\x82\x8a\xd0+yQ\xb3\xbf_\x90\xfds\xe1+\xbf?\xcc\x12h\xae\xe88\xac\xbf1\x08&r\x9as\xb2\xbf\x8e3~\xd9\x954\x9d?\x981P\x149\xed\xad?$<\x1b\xc3Q\xf5\xa3?[\x8fl\xfe\xac`\xc9\xbf8M\x88b\x968\xa9\xbfR\xd2e\xc4\xe6\xa7\xd1\xbf\x03\xd1\x8f\xd0z\xff\xbf?\x1b\xe6\xf2~t5\xb5\xbf\x1c\x08\xfd{48\xb2?Y+\xc0.\xa3\xe0\xb3?L\xfdR\x83\x01\x07\xa3\xbf\xd3/\xfd\xd5\xe2\xd8\x95?\xce{\xd1\x1fG\xe1\xa1\xbf\xf8\xa7\x82\xa2\xad\x16Y\xbfY\x84\xffX#I\xb3\xbf\x1a\xa8]&N>\x96\xbfd\xa1\xc4!\xcf\xf9\xba?/\xb8u\xa0\x0f\x9e\xd8?\xbeG\xf9\xb7v\x11\xd1\xbfG\xc1\x0e\xe3@\x08\xb4?\x882\xc9\xd71\x01\xaf?\xe7\xe6\xecR\xcc\xa9\xcf\xbf\xb7dP/\xe3\xd3\xb7?\xbf\x93\xcf\xa4\xdfRw\xbf7\xb0\x85\x93\xf2r\xa6?\xfcA\x8ab~w\xe0?\x1b\x9fF\x9ay4\xc1\xbf\xd2\xebg\xed%\xbe\xbb?\xc2\xbd\x91\xcdOe\xb0\xbf\x1aN\nv\xdb\xfd\xc0\xbf\x08\xd0\xd6,\xa4\x95\x86\xbf\xdf\xc7\xe1W\x94\x1e\xb4\xbfp+\x0f\x1by\x9a\x81?\x95a\xb66\x8b\x83\xb6\xbfc\xa7\xbd\xc0)\xdd\xa3\xbfF\xdc\x06\x87\x9d\xd4\x9c\xbf\x80\x03?s\x85h\xa9\xbf\x04\x84\x0f!\xd5\x91\xc7?\xd0\xf5\x1ae\x89\x85\xc2\xbf\x0bJ\xc9\xffl\x18\xd3\xbf\xbc\xd7\x1cF\xb7\xb5f?]\xea\xfb\xe5\xb7\xba\xc3?\xc7<H\xe0\xdf\xcf\xaf?\x92qk,J]\x8c\xbf\xb6*\x93\x91\xe6K\xd5?\xc2+N\xfat\x04\xb5?\x85w\xf2\x07\x14\x1a\x8c?l\xffp\x03O\x01\xb0?\xef2\xa4\x81\xe1g\xd5\xbf6\x03\xd8\xa0\xa6`y?\x06\xd6\xfe\xfc\xf1\xc4\xc3\xbf\x0f\\sb\xe6\x83o\xbf\t\x9d\xc7}r\x00\xbf?~\x05\x122m\x9e\xc2?\xda\xd9\x1b\x93\xd4\x8a\xd7?S\xcfFExR\xc6\xbf\xb3\x90\xb3\x08O\x9b\xbc?\x1e\x97["\xa4\xac\xb9?\'\xa8\x00Y\x98\xa0\x9b?-\xf3\xd3\xbe\x9a\x1b\x7f\xbfJ\xc7}@F\x00\xbe\xbfK\xbc\x91c\xee-\xa0\xbf(:\xb8\xb0\xd5V\x12\xbfZ\xeeF6\xdaP\xa4?\xc0^\xb2\xf4\x91\xbd\xc3?\xda\x12\xa9\xf0\xd4\x95\xb7\xbfJ\xc5J,\xa90_\xbf\xc2\x11\xe0%\xfb(\xbf\xbf\x8el\xba:\xe9\n\xa9\xbf/A\xa6\xa6\xf3#\xd2\xbfA\xdc\xb6FX"\xb9\xbfvc\x87\xc4\xe6\x95\xd7?\xe9\xc7^0\x0c=\xca\xbf\xcd3\xd0Rw+\xc3\xbf[L\xa1\xef\xe8[s\xbf\x9b\xbf\xf8Nf\xec\xc6\xbf\x990\xb6R]\xe0\xbd?\x89U~\x9eH\x0e\xbe\xbf\x93\xf1S\xd1&\xaa\x94\xbf\x0c/\x88\xddI\x82\xd5?\xec\x91\xda\xd8\x91\x81\x9e\xbf{7\xca\n8V\xc7\xbfF\x15\x10\x05\xde\xde\xc2\xbfw\x99)\xe7S\x92\xbc?\x84\x0f\xee:l\x06\xbf?X.2\xd0\xe6\xb9\xc2\xbf\xa8\x93{/\x15\x94\xbb\xbf\xd5I`\xf7\xb4\xf7\xc8?\xc0\x8cp`\xe2#u?l\xc5\x89\xdf\xf9\xf5\xc7\xbf)\xb6\x1e`\x8b\xea\xb1?\xa73\xf2\xbe\xe6a\x9f?\t\xbb\xae\x7f\x1f*\xba?\xbc\x03bb\xd2\xed\xb3?\xc3\xb0z\xb8\xces\xa6\xbf\xc7\xe9\x85\tw\xbb\xc2\xbf{\x1d \x91\xa8t\xac\xbf\x8e\xb4\x19\xc9\xfb\xd1\xcb?\xb9\xfbV0\xd3,\x83\xbf\x03\x16U_\x1e\x15\x8d?\xcf\xc4\xf9\x8c\x08n\xa9?\x85-\xa4\xe9\xed0\xc1?\xee\xb4\xa9\xa6\xde\x91\xb4?\x8c\x99mFM\xf4\xcd\xbf\xfe#\x05\xfb\xd9\xde\x9e\xbf\x90\xdb\xc0\xc1\xa0z\xcb?)\xb9\x9fu\xf0K\xb5?E\xe1\xcf8\xd2m\xdf\xbfX\xe0\x1a\xdc\x02\xdc\xbc\xbf\xbb\xe1\xf9q\xbbk\xcc?\xbc\x07\xdf\xf5\xe8\xb8\xa0\xbf\xcdp\xcfx\xa9\xe6\xd1?\xd7E\xf3D\x08\x8c\xa1\xbfQ\x1c\xff\x02\xdf\xa4\xc3?5AE=\x97\x10\xb8\xbf`fY\xf4\xe5\xbd\xba\xbf\x93\x85s\xe0\x0fM\x99\xbf\xd4\x19\x1c\xc8\xdf\n\xd0?8Fu#\x96\xbb\xb7\xbf\\\x04\xbb\xa7\xd3\xe7\xd5\xbf\x0bL~\xefk\x0e\xaf\xbf\xc7R\xf0\xaf\xac\x01\xbe?\xa8\xe6T\x02\x00\xd6\x96?\x95\xed1\x7f\xab&\xbc?\x8d\xed_\xc7\xd0H\xb7\xbf\xb6\xe9\x96\xe6-E\x8d\xbf\x81\'\xea\x0bc\x93\xb1\xbf\xb0\xdc<\x85\n\x83\xb1?\xd9\x8c\xf1\xc4\x0bn\xc0?.\x98\xe2\x16\xc2\x0e\xb9\xbfZ\x88\x8d\xc7\xd3\xe5\xc9?\xbe\xbb#\xb7\xc96\xa2?"\xd9\x15_\x93n\xab\xbf\xeeY\xf3\x12e\x9e\xb8?%1\xb6:\xad\xc2\xd6\xbf|(\xa1\xb1\xfe\x10\xb8\xbf\xb0\xaa\xb14\xb0\xe1\xb9?iq\xfa\xc0\x9e\xa7\xc9\xbf\xff\x02\x0f"\xa4\xcb\xb0?\xea\x11\xa8\xfe\x94\x19\xd2?d\xfc\x82\x83at\xcc?4\xcdA\xcf`\xc6\xd8\xbf\x16\x90\x8e\x1d8\xdf\xbe?\xa9+Vz\xbbh\xc1?\xba\xf9{P\x14\xb6\xb4\xbf\xf4G\xd6G\xeb\xa1\x7f?\xcf\xa5 \xc1\xbbV\xb0?M\xee\xf4\xf0\xef\xae\xcd\xbf8\x92J=-\x83\xbf\xbf\xdeA\xa3\xf0\xb8\xac\x9d?\x0f\xa4y*\xcf_\x9c\xbf\x98 \x1e\x08]\xba\xc7?\xca\x04P`b\xb6\xc2?\xd0k\xa5\xb5p\x86\xa4\xbf\xf0\nt\x88\x1c\xe3\xba\xbf\xd7\xffT\x9bW\xa0\xbd?L=\xd9`\xb8\xb8\xd1\xbfbo<EK\xe9\xc0\xbf\xd9\x99\xf9\xa3\x15\xb0\xb4?\x8b\xbe\xf6W\r\xad\xbf?\xf6h\x1d$ \xc6\xb6?\xdc\x1a0\xdbC\x8c\x91\xbf4J\xf7H\x9b\x95\xba?%\xa9\x0f\x0b\xb1 \xb3\xbfe\x1c\xaf\tO\xcd\xcb\xbf\x90\x1aS\xe7\x1fi\xa3?\xee\xcf\x87P\x8b|\xbe\xbf\xab\x0eFl~\xeb\x9a\xbflL\xfd\xd8\x0c\xa5\xc8?\x8ag\xbd\xbb\xfc\xcb\xc5?B\xe0\xef\x07\xf5\x14\xb9?5\xf5\xb0\x0e]\xbd\x97\xbf\xdb\xa5t\'\xf8\xed\x80?\xfc\xb1\xb4\'t\xa2\xc5\xbf\x93\xa2{\x93\xf2n\xb3?\xbf\xc9h\x886/\xa2?\xc4\xd6\x95&\x16g\xd0?\xe9\xb5\x8f\xd8\x9c\xccW\xbf\x9dq\xe3\x03D\x86\xb9?\xd8\x0b\xcdG\x0f\xe5\xd0\xbf\xc2\n\x8dHq\x00\xcb?QTxU&&\x89?m\xa7eNt\xcd\xc6?\x15\xc4\x97\xa2\xd5\x1a\xcf?\x85\xc5\xca\x16\\\xda\xc6?\x06Pu\xa2\x83\xa8\x97\xbf\xe2\xf8\xd5\x9b}U\xbc?@?\x02\xac\x1f~\xb7\xbf\r\xdc\xa8j\x17K\xd3\xbf\x19\xceM\xe7\xc8\xdc\x94?\xce\xdeG\xbd\xcak\xd7\xbf\x13\xe5-P4\xa1\xba?\x8a/[\xba\xdf\x01\xd3\xbf\xc6\xc9\xe1\x8e_\x81\x98?\xa8\xaee\xe2}\xa1<?$j>\x10I\xce\xad?\xc2F1\xe1\xdb\x00\xcd?O\x87\x83\x8e3\x81]\xbf\xce\xe9h\xb9\x88/\xb5\xbf A\x9c.\x8a\xee\xbe?\xe7}<h[\x12\xb6\xbf\x9foh\xf9\xac.\xb3\xbf]\x06\x8d\xe2\x9f\x98\xa4?I$W\xae\xa6)\xb8\xbfj\t7r\xf6s\xd1?,\xb5\x86\x92ga\xa0\xbfh\x02V\xcf\xfd\x05\x8b\xbf~\x82\x1c\xc1\xcfp\x9a?iy\xd4Q\x19\x17\xd2\xbf\xd7\xa45\xbe)\x83\xce\xbf\xf3u\xfa\xd7\x92\x1b\xd5?al\xd5\xadR\xd7\xa6?`\xb6\x1f\x1a\xa7\x8b\xca?c\xffo\x98\xe0\n\xc3?qU\x84IG\xcc\xb1?\x11o\xde\x96\x7f\x94o?\xe4\xe6\xc5\xfcBk\xcf\xbf\x06\x9d\x0e\x93!\\l?7\xa4\x90\x9b(ue?(\x96\x16\xb0e\xee\xb8\xbf\x16\x1e\xe0\xee\xec\xd5\xbc?4t\xfd\x1cMf\xcc\xbf\xf2\xb25\x15\xf7\x9b\xa1?\xd8\xa1^vP^\xd3?\xb8-\xcf\x9a8\xae\xb0\xbf\xeb\xaea9\x1b\xac\x8a?\xf5U&\xca\xb9F\x95?\xcc\x1d\x89\xd1\xbe\x7f\xca?^c\xe2\xb2\x15\xc4\xd0\xbf?\xd4p\x0b6S\xbc?T_\xfbfD\x81\xcf\xbf\xb4KQ\xd9\x10U\xd3\xbf\xba\xe5qp\xf9-\xa1\xbf\x8e\xb0\x9bkr\xf1\xd1?\xfe\x98\xfe\xfa\xd5\xf7\x96\xbf\xedy\xa4%KT\xc0?\x98a\xd6\xffx\x90\xb9?\x07\x04\xc7\xba\x86\x8c\xbc\xbf\x1b\x9a\x17\x89V\xf8\xbf\xbf\xfb\xa7i\xf5\x11a\xb1?\xaa\xde\x05\x9a+\xd4\xa9?e\xdd\x90\xe6[8\x95?\xaa\x9e\x10\x87&\xab\xca?\xa5\xfa\xc1\xd4\xd4\x97\xbd\xbfV\xd0\xdbD\xd4Z\xb8\xbf\xb3H\rT]i\xd2\xbf\x03\x8a_\xcf\xda\x8c\x9b\xbf\xea*\xf6\xa1\x92\xf7\xa4?}\xf1~\x12\xe8\xa0\xc5?y\xda\x86\xea\xa5%\xc2\xbf\xdeb\xc5\xcf\x08\xff\xd0\xbf\x0b\xf8\x8e\xdb\xd5+\x92\xbf\xe7\xc4\xae\x93|\xbe\x99\xbf+\x16b\xa3J\xde\xc8?5\xfb\xe8\xa9\xafE\xcc?2J\x16!\xe4\xf7\xb6\xbf\x8b\xc8\xa3\xe6\x8by\x9d\xbf\x80\xa9)\x8dU`\xd8\xbf\xd7\xe0Nk\xf6"X\xbfx\x8c\xd3\xbdO9\xc9\xbf\x0b\xdc}\x8dY\xd8\xca\xbf:\xbbpr\xcf\xbd\xa6\xbf\x0f~\xb9\']\x07\xb5\xbf\x99T\x0f\\BW\xd0?\xaaZ\xf0rK/\xa6\xbfA\xc9\xde#\xb0\x1a\xd9?o\x8f\x0b\t\x9f\xfc\x80?\xa5\x01A\x8ceP\xbc\xbf\x90}\xc0\xad\x9f\xf4\xa2?\x11\xb8\xdb\xcc\x0c}v?B\xed\x11\xdc\xeb\x10\xb0?\xbc\xf2\x81\xf5\xb6\xb5\xbc?q\xbb1L\xe3\xa5\xb1?6\xba\x97\xb3H\xd9\xb5\xbf\xb2\x918\x07g\xec\xa2?\x94:\x99+\x0f*\x9a?\x80\xda\x81\x1cD\xa9\xab\xbf\xe6\x086[F@\xc7\xbf\\\xea\xb2\xc89\x13\xbb?\x813\x19\xdd\xfa\xe3\xd8?: \xa4p1\x89\x8f?\xb6\xe2EH\xd3\x12\xc2\xbf\xe7\xad\xefu\xb5~\xbf\xbf-\xe8\n\x92[f\xb2\xbf\xd0\x17q[u\xe9\x83?\xc1\xa5\x02\xc5=-\xc3?\x84t\x85\xc2\xb8<\x98?\xfa0\x97(@\x8d\x9d?\xb9z\xc3\x7f#\xe7I?]M\xf7T\xeb\x1c\xb9\xbf\xfb=\x19\x14\n\xd2\xb1?^\xa1i\xb4;\\\xa4\xbf\xfb|Qvp\xb1\xaf\xbf\xff\xac\xa6\x83v\xc3\xc5??\xf37\x1a\x0eG\xce\xbf\x15\xdb\xd1\x1e1\xe1\xb3\xbf\x92,\x8f\x9f6v\xba?L\xe2\xb2\xce\x7f8\xb9?\xef\x03\xb9nQ\xd4\xa9\xbf7}_\xf0\x1c\xd8\xb7\xbf+\x91\xcc`\xfe\xcd\xa7\xbf(\x7fqs\x9cs\xcf\xbf\xd6\x97V\x03\x14\xc5\xb3\xbf\xbcG"\x06\xf0I\xd5?G\xa7\xccr\xe5\r\xba\xbf\xd4\xc1\xd5\x81\xbb\xc3\xc6\xbf\x88n\xa4\x96\'\xad\x9f\xbf\x87}sKO\xc9\xba?r\xe3\xf0\xa2\xbaw\xc4? \xf4\x97\xd3M\x18\xdb\xbf\x05\xe1\\\xbe\x03\xaf\xc3?R\xcd\x9c\xbdW\x8c\xd5?\xa5\xb3\xa1#\x1by\xba?\x14\xcb\xb1f\xf8\x9b\xb8?R\xf6\x08\xa3\xe4\xf8\x93\xbf\x941\xba\xfd\x8a}\xb4?\xe7\xf4A\x82\xfe:\xc0\xbf\xae\xad]\xe5\'\x8c\xad?\xa6\x11\x19\xa3\x89\x16\xda?\x8b?X<\x02\xeb\xcc\xbf]4\xcd\xc1\x0f\x0c\xad?Q\x04C\x8bj%\xc8?\xec\xff\xd4\xfd\xd9\x1d\xd6\xbfdE\x00\t\x8c\xf3\xbd\xbf|\xc6\xa7\x04\xd0x\xc4?\x8a\xe9\xe4\xd9\x1d\xd2\xc4\xbf\xf7\xc2`\x93\x05F\x8c\xbf\x9f\xdb\th\xdf\x14\xab?\xd8\x81\xa5\x97\xe1\xb9\xbf\xbf\xe3\x91\x9f\x10W\xb9\xcb\xbf\xba\xac&\xb7\x89\xf4\xd0?\xe1Q\x1f\x8c\x05Y\xc6?\x11\xa2%uIL\x90\xbf~p\x92\x930\xf1e\xbf\x00>\xea\xa49y\xca\xbf\x99{`%E_{\xbf/\xbfu\xa1\x15\xa3\xc4?\x83\x8d\xb9\xcf\x9b9\xc0\xbf\\\xaa\x19\x17\xc2\xd3\x80?\xc6\x18\xc73\x90\xed\xd0\xbfo\xe1\x99Z\x81\xacz\xbf\x01\x1b\xee\x13\xa7\x10\xd1?\x06\x87\xba\x1c\xa1-\xb8\xbf\xf6\xd0\x08\xf5|u|\xbfO\xfd\t{\xa71\xb4\xbf\n\x1bnh\xd9\x1f\xcd?\x7f\xf6\x8f\xa1\xeaZd?\xbc\xdf\x17\xa6R\x9b\x9e?o\xd9\xda\xd0\xda{\xc3?\xaa\x16\xdeY.L\xaa\xbf_\xdd\xd4\xc5o\x8c\x88?\x1fV\xc6GI\xcd|?\'m\xa4\x82n\xb9\xc8\xbf\x04\xe3\xd6au\x91\xca?l\x0f\x13\xe0u\x99\xb0\xbf\xe9$\x1a\x98~Y\xc0\xbf\x9d\xfds\x8aa\xff\x9f\xbf\xa8\xd0v}\xac\x99\xab?\xd7B\x90yoQ\xc2?\x85{\x022{n\xcd?]\xb3]2\x04\x06\xb5?md\x03/\xd0\xdf\xd0\xbfa!\xc2\xeebH\xa9\xbf\xdd\xcerR&\xcd\xbf?\xd1 {\xa2\xcf\x1f\x94?\xbb3\x13\xa3:\xab\xc4\xbf3n\xcf\xb49\xdb\xa6\xbfyB&\xbbo6\xa7\xbfT\'%,!\x86\xa3\xbf!\\\xd6H&\x96\xb0?[\x97\x05\x10v\x95\x9d?\x85\x05\x927W\xa4\xac\xbf\x1e\x14\x0bJ\x90\xc0\xb9?\x91\xdeZ\xff\x15\x83\x97?H\xb5\x1c\xf5cB\xab?\xd9\x81c\xe6\xb2\x19r\xbfe\x8e\xc6{@G\xa3?\xd3\x04L\x9f\xc7\x95\xd1\xbf\xae\xe0\xf2\x8b\xed\x9f\xb3?\xda%X2:V\xda?\xe5\xc9S\xf1\x15\xa7\x90?\x8f\xc1\x96\xa4?\xd3\xb9\xbf\t9J6-\x15\x82?e@F\x11\x88\xe7\xd3\xbf\xcb\xdf\n<\x8d\x81\xb4?\xee\xe1REq\xe1\xc9?\xd2\x97\xf2\x15\xa2,\xab?\xc1\x1a7+;\xf0\xd3?\xd0\xa1\xea\xba\xb2\xf3\xac\xbf\xf2\xb6w\x87\xed\xde\xd4\xbf\x12\xbe\xd5\xa3\xfd\xe5\xc2\xbf\xec\x05\xf1\xb8\xc9\xdc\xa5?\xeb\n\xc1\x0e\xf3"\xd1\xbf\xfe\x99\x02\x9c\x17|\xb7\xbf\xd0\xcbM\xc4\xcc\xd0\x94\xbfm\x86\xeb?\x16\xe8\xa4\xbf\x914\x8ck\xf6\xc8\xb7\xbf&\xf3\x8f\xe2\xd6\xa0\x8b\xbf\xa0\xed\xf7z\x91O\xa8?\xbca\x985\x98I\xab\xbfTBt\xfe[\x90\xc2?\x90\x95\x9f\x0c\xb2\xd8\xa6?\xe8U\xd6\x91\x90\x05\xb0?\xe0w8\x10`\xfc\xb3?\xeens\x85\xf8i\xc6\xbf\x13Aa=\xbeo\xbb?\x0be\x1a\x81n\xe7\x8e\xbfV\x8b\xf8+\xcd\x8a\xc2?1^\xea\xb1\xb4:\xd1?\x07\xe6@\xfd\xb3@\xe2\xbf|\x88\x8b\x89\xa0\xf4\xc9\xbfV\xec\x89@\xa1\t\xc0?\x95oUj\t>\xae\xbf{\x99\xe6\xfd\xb8k\xdb?\xb0\xc2\xd4K\xed\xd5\xa5?*3\x9d\r\xbc\x04\xc0\xbf3\xdca\xdc\xb5\xbb\xc2?\xd6\n\xd1e\xb7&\xc2?\xe1J\xb0\x842\x86f\xbf/JAC\xa8%\xb0?B\xaaL\xe1J\xf3\x80\xbfc\xd8"\xd9A\x91\xa8?\xe0\xb4\xa1\xec\x83\x1b\xb0?Ki\x00\x85yv\xbe\xbf\xd2\x01\x19\xdf\x8c\x9e\xb6?\xc9\xfe\x03\xffq\xea\xc5\xbf\x99\xb9\x06\x97_\xc2\x92\xbf\x17\x19\xeb\xca\xc1\xe3\xc8?W\x16\xba\xf3g\xf6\xb0\xbf<_\x86~R\xe5\x9e?\xfb&i\xa1\xf3y\xad\xbf\x93r\n\x8d\xa4D\xcc\xbf\x00\x08\xf11:\xb4\xc2\xbf\xe3\x83\xddO\\\xb8\xb3\xbf\xedp\xcfy\x1a\xb7\xad?x\xb0\xb3\xfb\xfd\xbf\x9b\xbf \xae:\xba<\xe4\x9c\xbf\xe7u\x00\rPz\x93?\x14*b\x8e6`\xd1?\xc6\xfa$\x9d)\xb5\xa6\xbfO\x0e\xff\xb7J\x94\xd4?E\xf2\x1f\x12\xbe\x80\x89?lK\xb3\xc6\xa2\x12\xdb?j*\xc5\xdf\xdd~\x8c?\x9b\xf5L\xf9\xdep\xb1\xbf\xce\xe7P(\x8b\xb8\xaa?G\x82C\x9fqE\xaf?\x0e\xa8\xcd\xec\xf4U\xf9>\x9e\x87\x94p\xb5l\xa7?\xd9\x06\xd5\xa0\x07]v?\xfb;_G\xd1f\xce?;\xe0\xf5\xa7\xdb\x08\xb1?\xc8\xc2\xd9\x92C\x08\xbb?\x04eQ\xf6\xa7\xb7\xb5\xbf\xac\xf1_f%\xc4\xb2?\xef\xb40\xc5;\x1br?\x85\xd1\xe3\x1aH\x87\x83?\xb1\x127\xb5\xad,\xb4?Ka\x0c\x1a\xa0\xe9\xa5\xbf\xe9\xde\x8f9\xc0:\xbe?\xf2t+\x90O\xfd\xd3\xbfU\xb9"\xac\xa4Y\xa2\xbf\'\x98\xe3r\x8a\x11\xd6\xbf\x83\xf8{}\xb8r\xad\xbf\xe5"\xe2\xe5xS\xde\xbf\xbe\xe1\xa9X\xe7d\xa7\xbf1G\xe5\x1a\xf1\xe9\xa5\xbf\x12\x18\x02\xc18I\xba\xbf\xd1g\n\xa605\xc6\xbf\xab\xb1\xc2 \xd3\xf1\x97?\xec\x834\xa0\\\x90\xbd\xbf|\xd8\x92\x8b\xce\xab\xb4?\xff\xa6~!SFt\xbf6w\xd3\xc6\xa2c\x94?I\xbdv\x19F\x03\x9b\xbf.\x16\xe3\xa0\x1f\xb9\xc2?\xdd\x8d\n\xee\x00C\xaa?y\x81z\xc6v\x85\xe4\xbf&\xb15\xe3\xa2\xb4\x8f\xbfH\x1f\x85?\xde\xbc\xa7?\xd1\xd8\x9c\xfdu\n\xae\xbf\xc6\xaf\x1a{\x17\xb6\xe3?\'r\xbd0\x90\x90\x92\xbf\xcemn\x87\x80\xd8\xc3\xbf\xd7{\\\x96\x99\x80\xaa?\x1f\xf1\xfd\xf0\xa0\x0f\x86?\x92C:#\xa5\xb5\x91\xbf\xc0(\xdb\xafh\xc7\x7f?\xef_\xbe$q@\x98\xbf\xda!b|\xe3;u?\x9dE\t\x86\x8c\xb6\xa0?\x90G\xda\x82D0a\xbf\xed)\xb7A[\xc1\xb5?*H`+[\xeef\xbf\xefg\xed/$\x11\xa1\xbf\x92\xdcK\xfa\x9b\xb0v\xbf\xd6\x97!-\x19\x96\x8b\xbf\\\xc5\xa2b\x94\n\x9f\xbf\x1f\x95)\xc4:\xdc\x92?\xe3\x0c\x13\x8b\x05\x93\xd2?s\xdd\x8d\xe9\xb3\xdc\x97?\xd5\x1d:W\x93\xac\xbb\xbf9h\x9c\xa9\xd8\xfe5\xbfd\x08\xd6\xbd\xb5\xdd\xc7\xbf\xfc^\x13&\x12\x90\xa1?\xbb\x86\x85\xb7w\xd9\x8a\xbfU\x17\xbbP\xc4\x01y?N\xdec\xb2\xab\xacr\xbfg\x99\xf7.\r\xf1\xa4\xbfp\x1c{\x00\x1d\x90\x9b\xbf\xb7\x1b$\xe1i\xfa\xac\xbfBb\x92\xc8\x91I\xa1?\x7f\x83\t\xe8\x94"X?\xe8\xe7c\x0f\xf0\xa0\x91?;{A\x9eK\x81\x96\xbf\xaa\xcf\x19\xf2\xd8vs\xbf\xf3\xa8r\xc75bp\xbfNf$T::\x84\xbf\xd1\xb7 ?\xd4\x86\xc7\xbf\xbc*\xbcz\x98L\xad\xbfV\x05q\xd5G\xf3\xb3\xbf\x85:_G\x12E\xa8?PZ\x12N\xa6\xaa\xcf?\'\xe3\xfbk\xc9\xa5v\xbf\x84h%\x99!8z\xbf\x01p"\xe3\xe0\xf5\xa1\xbf;u\x8f\x1d\xea\xa7{?\x84\xefD\x19 \x8c\x96\xbf\x1aW[+\x15=\x93\xbfyi9C-\xa7\xb8?.\rO\xdck\xcf\xc6?\x9b\x8aK\xd0\xd0\xdf\xd7\xbf\xdd\xb2{J\xc8\xf6\x89?\x95\xd6\x9a\x18\xff\x8c\xd4?\xa1B\xb1\xe4n\xd8\xcb\xbfG OuRF\xab\xbf:\x88N\x1b\xcc\x89\xa4?\xddn\xecv^\xfc\x80\xbf0rH\x1c\xbf\xd9\x8e?z\xa6\x07\xad\x08\x80\xcb\xbf\x13VJ>\xd7\xad\xbc?\x9f}\x9b\xb8<\xb5\xdc?\xb2.\xc9\xe5\xed\xd3\xb4?\x07J\x17\xfaUw\xcc\xbf\xc3\x9b\xb8\xcf\x97\xb4\xc0\xbfW?\x96\xd1\x81\xe5\xa7?\x93\xca\xf2}\xf8\x98]\xbf\x91\x82F\xf9!\t\xc7?\xd2\'\x98u?5\xcf\xbft\xe5\xb0_c\xed\xcd\xbf\xa4\xf0^\xe55\x04\xa5\xbf,\xef\x05\xf3\xeb8\xb5?\xae\xd9\xe7\x83\xf6A\xd0?\x8c\x8c\xb8\x9c\xb4\x89\x8f\xbfk\xb0e\xa2\xee%w\xbfbIb\xc5\xc8b|\xbf\xb6G\xd7\xd8\xb4\x0b\xa0\xbfh\t\xa7\xec\x85h\\\xbft\xd3y\x81\xd2\x03\xc8??h\x92O2\x0f\x91\xbf1w\x0e\n\xc9\x9b\xc2?yz\x98 \xc4\x81\x9d?70&\x0f\xae\xf3\xc9\xbf\'\xd9k\xd0{\x1bt\xbf\x10\x0e&\xaa<t\x8e?9\xc5\x17\x8f\xa0\xa6{?/\xe5\xb0\xdf)~\x84?\x88!{?_\xb4\x98\xbf\xf9\xc3?\x9d\xf0c\xa3\xbf\xd1\x8d\xaa\xe2\xca\xa4\xd0?\x8d\x9e\x99]\xf8\x19\xbe?<y\xe0Z\x04M\xe3\xbf\xbef\x14\xcf1#\xc4\xbf\x93y\x9f{\x12?\xdb?\x0b\xe8\x15\x7f\xe0\xfc\xb6?\xb4Mzl]\x7f\xb0\xbf\xf1\xc8\xb8\xb5T\xf4\xae?g\xa2 yqm\x9b?c\xc2\x93:\x07\xe8f\xbf\xbd8\xab\xfa]\xab\xb3?hc\xa9\xb42\xca\xc2\xbf\xc7\xdeBxp\x1b\xc4\xbf\xacY/\xb2\xdeEt?b\xd8\xb9\xff\x95\xfa\xb4?Y{\xb3z\x11j_\xbf\xb4\xbb\xf0y\xd3\xe91?\x01L\xda)4\xdce\xbf\x11\x8fR\xbbD8\xc1\xbf\x99\xb6\xe9\x9cC\xd7\xc0?)\x9bx\x98iF\xd2?~\xcd\x0b\x11\x06\xd6\x80\xbf\xfe4\x9b\x8aR\xf6\xc4\xbf\xf5\x19RKE\x1b\xc5\xbf\xb0\x98\xde\x93\xcd\x9a\x84\xbf\n\x1f\xa0\x0f}c\x86?\xb7\r\x97\xbek\x98K\xbf\xe4T\x85\x84b\xf1\xa4\xbf\xd4a\x17)`\t\xaa?\x9d\xe3\x0e\xd9\xa6K\xd5?\xf4\x82\xc0Y\xa6\x10\x9c\xbfj\x92W\x96\x10\x15\xd9\xbf\xbf\xce\x80\r\xe8h\x94\xbf\x05>\xb8\xea\xb1;\xc5?\x88\x01&\xe4\x0bDh?pBM\x8f\xd2U\x99\xbf\'\xb0)\xd9\xdf ~?\x0b3\xb2\x92@I\x88?\xb4\xbb\xac\xf1T\xc5\x87\xbfi\xbb8\xd9\xba?\x95?\xf5\xe0\x8c\xf08\xb2\xaa?\x94%e\xc8\x83D\xd5\xbfR\xa2\x94\xa1\xb3\x97\xc0\xbf\x85T\xa0\x9e\xf2\x82\xe4?\x8e\x86CW/\x81\xb5?\xc2H\xd6O\x87\x9a\xd3\xbfR\xcbV_^%Q\xbf\x965\xcf\xb4\xc6\xff\xa5?Tj\xd8\xe6\x97\x00\x91?\xc7\xd0\x96\xc54-\x80\xbf\x19s\xb3\x7f\x92\x07\x9e\xbf\x16\xc8\x17\xcf\xf1\x8f\xa1\xbf\xa6C\xc2{\x9c\xc6\xa3\xbf\xbes?\x005\xa3\xc2\xbf>\x86\x8c\xb0\x16_\xaf?\xd0)7@\xa8\xb2\xbc?\xa0\x87\xf8\xdf\xfa.\x81\xbf\x05+\xa4)wc\x82\xbf(`\xbe)\xd7\xe0\xa7\xbf\xc1\x98P\x7f\x04\x94\x99?\xb5\xa6\x1d\x14\tY\xb7?\x1d)\x93\xa9lb\x91\xbf\xd0U#\xb3\xa2\xf5\xa5\xbfr>\x02F\xfeE\xab\xbf(\xb5\xb0\x80z6}?\x94f\x01\xd6\x86\x83v\xbf\x0e\x81\x9d\xdd\xed\x15B\xbf\xccl\xc8\xdc\x03\x10\x8e\xbf0\xba\xc1\\R\x1a\x99?\xf7^Ni<\xfc\xc9?%}\xfd|\x04L\x82\xbf\x1c|a\xc7ty\xb9\xbf\x12\x93 \xa0\xc9,\x85\xbfR$\xde J\x00\xbb\xbf\x81\x86\x7f\x16\xd6\xc1\x81?V\xd2\xc3\xd4\x92?a\xbf\tr\x03\xb0\xf7@\x81?|+\x99\xaa\xf6\xd1A?\xc4PN\x90\x9a\xc3b?\x85\xfd\x1b\xce\x08\x0fq?0G1\x10\xa1\xcb\x91\xbf\x9d#(\x11\x17\xe9\x91?\x8e\xa7x\x04\xbe?\xb0?.\xda\xac\x0f\xcd*\x96\xbf#\xb4\xef\x0f\xfe\xa0\xa7\xbf\xda\x08t\xb71\x1f\x86\xbf\xee\xf6\xa4\t\xa6\x83g?\xc0\xd8?CX\x07\x83?\xa2\x9e\x008&0\x81\xbf\x81\x8b\xff2\x04*~?\x13G\xee\xd1\xc6$\x82?\x9a(15\xf2\xc8\xe2\xbf\x7f\xe2\x14\xa7\xf4q\x84\xbf\x9e\xc4%\x98\xcf\xfa\xdf?7e\xb0!\x03\x1b\x94\xbfD\x89\xf6\xe7xx\xb7?\n\x01l)0\xe7\x89\xbfZ\xc7\x15\xb0Xho\xbf\xfe-\xa7\xa25\x17\x96?bm\x1b\x91\xceMr?B\xd9\xees\xa6k\x9b\xbf,0T3iY\xd9\xbfj\xcd\xfblc\x96\x8a?\xc4\x93/$e,\xda?\x99\r\xcb\x83\xe2\xbef?\xa1?\x18s\x9c\x90S\xbfm\xa7\x0f\xd0\xdb\xcfh?{\xd5\xa0\t\xd9Z\x86?\xde\xdd\x95\x94(T\x93?\xc8 W\x97\x85~\xd0\xbf\x1f+\xe0\xaf\xfcb\x92\xbf\xfc\xc7C\xe2\xb0\x15\xe1?\xd4\xca\xfbPIbc\xbf\x0b\x02\x88\xaf\x0e\xa7\xce\xbf\xcb\xd6S\xf0\x0c\xf1j\xbf\x97Rz\xc0{fU\xbfc\xd01\x05:\x1dE?\x81<U\x9d\x18\x9ex?_[x(\x9f\nx\xbf]\xf6\x9b.\xaf\x1c\x81?\xe0\xbe=#\xc3[\x9c?1\xe8\xe0uY\x1b\xc1\xbfm\xa7\x98B\xad\n\x9f\xbfvl\xb4lR\x85\xd1?\x1e\xec\x84\x87\xc5\xd5}?\xf1\xeaI\xae\xec\x93\xc5\xbf^\x14W&\x8e1s\xbf\xb9Lr\xf5H2X\xbf\xfa\x10\xbb1\xa3nf\xbf\x07\xe4\x85\x1f\xa2#J\xbf\xb9\xc54v\xb0\xaa\x9f\xbf\x0c\xdb\xd9\x8a\xfc\xde\xbd?\xea\x04\x13\xdb)\xf0\x9b?ND^\xf0\xa2\x85\xd3\xbf*\xfb\x1bsY#y?\x92>\x8a\x89\xcb\xf0\xc0?\x12c(B\x90\xf4=\xbf\x9a\x1chDc9\x80\xbf\x81.\xb5m\xb6qz\xbf\x1f\xbb\xc2\t\xc2\x05\xcc?\xe1\x0cQ7Co\x90? v\x134\xd3\xa8\xdd\xbf\x1aW0q\xf5Qs\xbf,#.\xb0=\xe6\xd1?}\xd6\x83H\xb0v\x7f?\xfd\x91\x9e\x93u\xedz\xbf\xce\xb3wL\x9f\xaa\x85?\x83\xeaV\t3C\x80\xbf\xb7"\xe7\xd4\xd9 \x91?YHv\x08\x10\xfb\xb3?\xfd=\x82\xed\xea\xbeu\xbf\xa9?C\xd0\xd1~\xbb?t\xea\xd3\x1f\xbc\xc5\x8c\xbf\xd9\x82\xa1\xc0M`\xc1\xbfqlX\x00\x94\xe8h?Yqz\xc7\xf9\x9fP?\x08\xa2\xed\xd3\x7fhL\xbf3\x9e\xf5\x0fb\xd5l\xbf\x8ao\x16\xd0\xfa|g\xbfia\rJ\x9c\xa0`\xbf\'\x95\x04\x01\xc6np\xbf\xfb\xf9CN/\xf9\xbe?\xb9[\xd7\x0e\x87mv?\xe6[\xb2\x97\xe6\x1f\xab\xbf\x16i\xd9zQ@}\xbfC\xf5\x8e\xb4\x0b\n\xac\xbf3M\x1c\xe9cI\x80?2\x18\x074tpw?\xa2\x1d\xdcUY\xd2\x95\xbf\xee\xc4\xd8M\xed^\x84?\x8c\x03\x02\x0e\x8b\xc8[?5\x07Z\x12\xea\xda\xcd\xbf\xffS\xb76\xe8\xec\x97\xbf\xee\xb6PPi}\xd6\xbf\xec2\xae\xba\xed\x89t?\x8c\x8e;"\xdcA\xe2?\xc1\xbf\xbb]["s?\x9e\xe4^.\x0e\x94d?\xf6\xb8\xb9rfwW\xbf\xc3\xf7\xcem\x91\xaf\xe1\xbf\xa9\x81\xf4\xab\x13\xe1\x93?\xb7i\xed:\xfdB\xd3?E\xf6\xec\x1d\x07\x1b_\xbfP\x96u_M\x14\xca?'
|
434 |
+
p179
|
435 |
+
tp180
|
436 |
+
bsg79
|
437 |
+
g64
|
438 |
+
(g65
|
439 |
+
(I0
|
440 |
+
tp181
|
441 |
+
g67
|
442 |
+
tp182
|
443 |
+
Rp183
|
444 |
+
(I1
|
445 |
+
(I40
|
446 |
+
tp184
|
447 |
+
g74
|
448 |
+
I00
|
449 |
+
S'+\xc4\xbcx\x90\x9b\x19\xc0\xf7\xc2:\x7f<\x0cV\xc0<%\x19\x9a\x87\xee4\xc0\x0c\xc6r\xc3^\x86K\xc0R`B\xfat?<\xc0nI\xd2\x0f\x93\xbc6\xc0\xe3\x99\xe1U@\x916\xc0\x99\'U\x8e\x02\xf9\xf8\xbf\x08!\x9c\xee\x16\x9f<\xc0Au\x8a}\xc3l6@wN\xe1D)\x845\xc0E\xa7\xb6\x90;\xc7K@R!\xe1K\x10\xf4"\xc0\xc5\xfa\xdd\x11D\xf3U@\x81\x8c\xc0\x9c\x9fN5@\x90w\t\x03\xf1\x8aL@p\x9f\xdc\x1e\x19\x87A@g\xf04\xe9h\x989@\x1b\x9a~\xd2}\nC@B\xebG\x95\x95\x95\xb4\xbfp\xa4q\x1c\xfb\x04B@a]\xfbU6\x977\xc0\xb5\xa4|\xd9\xf5n7@\x86=\xd3)\x16\xc5K\xc0\xb8z\x156\xb2\xf8\x14\xc0n\x16\xaf\xf0-\x9aR\xc0\x8e\xda=c\x88\xa8 \xc0\x07\xa2\x8d\xd5\xcc\xc66\xc0\x17PH\xd2\xae\x11\x19\xc0\xccP\x89\x14\x0b\xca\xf2\xbf@\xfd}\xef\x881"\xc0h\xeb\xb8\xfd7\x0f7@\xcf\x1f<\x11;u\x1e\xc0\x05\xc1"\xb8\xb5\x81R@\x0fV\xe42E\x7f\x15@}\x8d\x1f+\x13\x987@\xd6 yRQK!@(\x17W\x12\xec\x89\xef\xbf\x02\x8fH\'\x96\x80\x19@%n69x\x0c7\xc0'
|
450 |
+
p185
|
451 |
+
tp186
|
452 |
+
bsS'n_samples'
|
453 |
+
p187
|
454 |
+
I3148
|
455 |
+
sS'_n_active_components'
|
456 |
+
p188
|
457 |
+
I36
|
458 |
+
sbsbasS'reference_shape'
|
459 |
+
p189
|
460 |
+
g0
|
461 |
+
(g87
|
462 |
+
g2
|
463 |
+
Ntp190
|
464 |
+
Rp191
|
465 |
+
(dp192
|
466 |
+
g91
|
467 |
+
g64
|
468 |
+
(g65
|
469 |
+
(I0
|
470 |
+
tp193
|
471 |
+
g67
|
472 |
+
tp194
|
473 |
+
Rp195
|
474 |
+
(I1
|
475 |
+
(I20
|
476 |
+
I2
|
477 |
+
tp196
|
478 |
+
g74
|
479 |
+
I00
|
480 |
+
S'\x1f\xd0\xa4\xc6\x16\x9dx@\xff\xe4\xc0F\x8f\xbek@\x92\x83\x8a\xda\xe1\xaew@\x9dy\rz\x0b\x0ep@\x1aM\x8e\xbd\x81:w@D\x89\xe3\x88*5r@\x89\x1dp1\x10\x98w@\xaa\xb7\x08\xaf\xdf\x9es@!\x96&j\x9c3w@\xd2\x9f\x96\x8af9u@xn\xba\xf6d\xa4w@\xbd`\xfcA\x0flw@9a\xf83\xc9ix@\xe9\xd4\x98\x81\x9e\x8ay@\xc9\xa0\x97lP\x88z@\xb9\x0f\xdbEQ\x85w@5\xa4u\xbd\xd0z{@n\x1b-R"nu@\x11\xbcJ%\xb2\xae{@\xf3\xd6K/\xe8\xb6s@\x18y0\xb1)\x8b{@\xe1\xfa\xfc\x8a\x1b&r@\x92\x05\x957&\xacz@=\xd8\xb9\x934\x05p@\xb6\xd5\xdc\xfdZ\xb4x@\x13\xe0D\x0f6\x87m@O\x9fW5\x82\x82x@\xbbX\x7f\xf2W4r@I\x9f\xccL\x0b\xa5x@\x92/\nC\x19\xa5s@\x94\xd8\xe66\xeatx@~\x1b\x14A\xb5Cu@#jk\x91\xa0\x8cx@B]\x13\x1cl\xa6x@x\x8ec\xceW\x8dy@\x11\x1e\xbe\xd3{Mu@K\xeb\xad\x00\x99\xc5y@\x11\x82\xb4GP\xa8s@\xfe\xda\xd7x\xb6\x9ey@5*0}\x03/r@'
|
481 |
+
p197
|
482 |
+
tp198
|
483 |
+
bsg98
|
484 |
+
Nsbsg48
|
485 |
+
(lp199
|
486 |
+
g49
|
487 |
+
asg51
|
488 |
+
g34
|
489 |
+
sg46
|
490 |
Nsb.
|