younesbelkada commited on
Commit
4d6b877
1 Parent(s): 57bb0db

commit files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitignore +1 -0
  2. app.py +103 -0
  3. boundaries/stylegan2_ffhq/boundary_Bald.npy +0 -0
  4. boundaries/stylegan2_ffhq/boundary_Eyeglasses.npy +0 -0
  5. boundaries/stylegan2_ffhq/boundary_Hat.npy +0 -0
  6. boundaries/stylegan2_ffhq/boundary_Mustache.npy +0 -0
  7. boundaries/stylegan2_ffhq/boundary_Smiling.npy +0 -0
  8. boundaries/stylegan2_ffhq/boundary_Young.npy +0 -0
  9. boundaries/stylegan_ffhq/boundary_Bald.npy +0 -0
  10. boundaries/stylegan_ffhq/boundary_Eyeglasses.npy +0 -0
  11. boundaries/stylegan_ffhq/boundary_Hat.npy +0 -0
  12. boundaries/stylegan_ffhq/boundary_Mustache.npy +0 -0
  13. boundaries/stylegan_ffhq/boundary_Smiling.npy +0 -0
  14. boundaries/stylegan_ffhq/boundary_Young.npy +0 -0
  15. dnnlib/__init__.py +9 -0
  16. dnnlib/__pycache__/__init__.cpython-38.pyc +0 -0
  17. dnnlib/__pycache__/util.cpython-38.pyc +0 -0
  18. dnnlib/util.py +491 -0
  19. models/__init__.py +0 -0
  20. models/__pycache__/__init__.cpython-38.pyc +0 -0
  21. models/__pycache__/base_generator.cpython-38.pyc +0 -0
  22. models/__pycache__/model_settings.cpython-38.pyc +0 -0
  23. models/__pycache__/pggan_generator.cpython-38.pyc +0 -0
  24. models/__pycache__/pggan_generator_model.cpython-38.pyc +0 -0
  25. models/__pycache__/stylegan2_generator.cpython-38.pyc +0 -0
  26. models/__pycache__/stylegan3_generator.cpython-38.pyc +0 -0
  27. models/__pycache__/stylegan3_official_network.cpython-38.pyc +0 -0
  28. models/__pycache__/stylegan_generator.cpython-38.pyc +0 -0
  29. models/__pycache__/stylegan_generator_model.cpython-38.pyc +0 -0
  30. models/base_generator.py +248 -0
  31. models/model_settings.py +102 -0
  32. models/pggan_generator.py +133 -0
  33. models/pggan_generator_model.py +322 -0
  34. models/pggan_tf_official/LICENSE.txt +410 -0
  35. models/pggan_tf_official/README.md +174 -0
  36. models/pggan_tf_official/config.py +140 -0
  37. models/pggan_tf_official/dataset.py +241 -0
  38. models/pggan_tf_official/dataset_tool.py +740 -0
  39. models/pggan_tf_official/legacy.py +117 -0
  40. models/pggan_tf_official/loss.py +82 -0
  41. models/pggan_tf_official/metrics/__init__.py +1 -0
  42. models/pggan_tf_official/metrics/frechet_inception_distance.py +281 -0
  43. models/pggan_tf_official/metrics/inception_score.py +147 -0
  44. models/pggan_tf_official/metrics/ms_ssim.py +200 -0
  45. models/pggan_tf_official/metrics/sliced_wasserstein.py +135 -0
  46. models/pggan_tf_official/misc.py +344 -0
  47. models/pggan_tf_official/networks.py +315 -0
  48. models/pggan_tf_official/requirements-pip.txt +10 -0
  49. models/pggan_tf_official/tfutil.py +749 -0
  50. models/pggan_tf_official/train.py +288 -0
.gitignore ADDED
@@ -0,0 +1 @@
 
1
+ *.pkl
app.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+
4
+ import torch
5
+ import cv2
6
+ import PIL.Image
7
+ import numpy as np
8
+ import gradio as gr
9
+ from yarg import get
10
+
11
+ from models.stylegan_generator import StyleGANGenerator
12
+ from models.stylegan2_generator import StyleGAN2Generator
13
+
14
+ VALID_CHOICES = [
15
+ "Bald",
16
+ "Young",
17
+ "Mustache",
18
+ "Eyeglasses",
19
+ "Hat",
20
+ "Smiling"
21
+ ]
22
+ ENABLE_GPU = False
23
+ MODEL_NAMES = [
24
+ 'stylegan_ffhq',
25
+ 'stylegan2_ffhq'
26
+ ]
27
+ NB_IMG = 4
28
+ OUTPUT_LIST = [gr.outputs.Image(type="pil", label="Generated Image") for _ in range(NB_IMG)] + [gr.outputs.Image(type="pil", label="Modified Image") for _ in range(NB_IMG)]
29
+
30
+ def tensor_to_pil(input_object):
31
+ """Shows images in one figure."""
32
+ if isinstance(input_object, dict):
33
+ im_array = []
34
+ images = input_object['image']
35
+ else:
36
+ images = input_object
37
+ for _, image in enumerate(images):
38
+ im_array.append(PIL.Image.fromarray(image))
39
+ return im_array
40
+
41
+ def get_generator(model_name):
42
+ if model_name == 'stylegan_ffhq':
43
+ generator = StyleGANGenerator(model_name)
44
+ elif model_name == 'stylegan2_ffhq':
45
+ generator = StyleGAN2Generator(model_name)
46
+ else:
47
+ raise ValueError('Model name not recognized')
48
+ if ENABLE_GPU:
49
+ generator = generator.cuda()
50
+ return generator
51
+
52
+
53
+ def inference(seed, choice, model_name, coef, nb_images=NB_IMG):
54
+ np.random.seed(seed)
55
+
56
+ boundary = np.squeeze(np.load(open(os.path.join('boundaries', model_name, 'boundary_%s.npy' % choice), 'rb')))
57
+ generator = get_generator(model_name)
58
+ latent_codes = generator.easy_sample(nb_images)
59
+ if ENABLE_GPU:
60
+ latent_codes = latent_codes.cuda()
61
+ generator = generator.cuda()
62
+ generated_images = generator.easy_synthesize(latent_codes)
63
+ generated_images = tensor_to_pil(generated_images)
64
+
65
+ new_latent_codes = latent_codes.copy()
66
+ for i, _ in enumerate(generated_images):
67
+ new_latent_codes[i, :] += boundary*coef
68
+
69
+ modified_generated_images = generator.easy_synthesize(new_latent_codes)
70
+ modified_generated_images = tensor_to_pil(modified_generated_images)
71
+
72
+ return generated_images + modified_generated_images
73
+
74
+
75
+ iface = gr.Interface(
76
+ fn=inference,
77
+ inputs=[
78
+ gr.inputs.Slider(
79
+ minimum=0,
80
+ maximum=1000,
81
+ step=1,
82
+ default=264,
83
+ ),
84
+ gr.inputs.Dropdown(
85
+ choices=VALID_CHOICES,
86
+ type="value",
87
+ ),
88
+ gr.inputs.Dropdown(
89
+ choices=MODEL_NAMES,
90
+ type="value",
91
+ ),
92
+ gr.inputs.Slider(
93
+ minimum=-3,
94
+ maximum=3,
95
+ step=0.1,
96
+ default=0,
97
+ ),
98
+ ],
99
+ outputs=OUTPUT_LIST,
100
+ layout="horizontal",
101
+ theme="peach"
102
+ )
103
+ iface.launch()
boundaries/stylegan2_ffhq/boundary_Bald.npy ADDED
Binary file (2.18 kB). View file
boundaries/stylegan2_ffhq/boundary_Eyeglasses.npy ADDED
Binary file (2.18 kB). View file
boundaries/stylegan2_ffhq/boundary_Hat.npy ADDED
Binary file (2.18 kB). View file
boundaries/stylegan2_ffhq/boundary_Mustache.npy ADDED
Binary file (2.18 kB). View file
boundaries/stylegan2_ffhq/boundary_Smiling.npy ADDED
Binary file (2.18 kB). View file
boundaries/stylegan2_ffhq/boundary_Young.npy ADDED
Binary file (2.18 kB). View file
boundaries/stylegan_ffhq/boundary_Bald.npy ADDED
Binary file (2.18 kB). View file
boundaries/stylegan_ffhq/boundary_Eyeglasses.npy ADDED
Binary file (2.18 kB). View file
boundaries/stylegan_ffhq/boundary_Hat.npy ADDED
Binary file (2.18 kB). View file
boundaries/stylegan_ffhq/boundary_Mustache.npy ADDED
Binary file (2.18 kB). View file
boundaries/stylegan_ffhq/boundary_Smiling.npy ADDED
Binary file (2.18 kB). View file
boundaries/stylegan_ffhq/boundary_Young.npy ADDED
Binary file (2.18 kB). View file
dnnlib/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ #
3
+ # NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ # and proprietary rights in and to this software, related documentation
5
+ # and any modifications thereto. Any use, reproduction, disclosure or
6
+ # distribution of this software and related documentation without an express
7
+ # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ from .util import EasyDict, make_cache_dir_path
dnnlib/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (229 Bytes). View file
dnnlib/__pycache__/util.cpython-38.pyc ADDED
Binary file (14.1 kB). View file
dnnlib/util.py ADDED
@@ -0,0 +1,491 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ #
3
+ # NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ # and proprietary rights in and to this software, related documentation
5
+ # and any modifications thereto. Any use, reproduction, disclosure or
6
+ # distribution of this software and related documentation without an express
7
+ # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ """Miscellaneous utility classes and functions."""
10
+
11
+ import ctypes
12
+ import fnmatch
13
+ import importlib
14
+ import inspect
15
+ import numpy as np
16
+ import os
17
+ import shutil
18
+ import sys
19
+ import types
20
+ import io
21
+ import pickle
22
+ import re
23
+ import requests
24
+ import html
25
+ import hashlib
26
+ import glob
27
+ import tempfile
28
+ import urllib
29
+ import urllib.request
30
+ import uuid
31
+
32
+ from distutils.util import strtobool
33
+ from typing import Any, List, Tuple, Union
34
+
35
+
36
+ # Util classes
37
+ # ------------------------------------------------------------------------------------------
38
+
39
+
40
+ class EasyDict(dict):
41
+ """Convenience class that behaves like a dict but allows access with the attribute syntax."""
42
+
43
+ def __getattr__(self, name: str) -> Any:
44
+ try:
45
+ return self[name]
46
+ except KeyError:
47
+ raise AttributeError(name)
48
+
49
+ def __setattr__(self, name: str, value: Any) -> None:
50
+ self[name] = value
51
+
52
+ def __delattr__(self, name: str) -> None:
53
+ del self[name]
54
+
55
+
56
+ class Logger(object):
57
+ """Redirect stderr to stdout, optionally print stdout to a file, and optionally force flushing on both stdout and the file."""
58
+
59
+ def __init__(self, file_name: str = None, file_mode: str = "w", should_flush: bool = True):
60
+ self.file = None
61
+
62
+ if file_name is not None:
63
+ self.file = open(file_name, file_mode)
64
+
65
+ self.should_flush = should_flush
66
+ self.stdout = sys.stdout
67
+ self.stderr = sys.stderr
68
+
69
+ sys.stdout = self
70
+ sys.stderr = self
71
+
72
+ def __enter__(self) -> "Logger":
73
+ return self
74
+
75
+ def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
76
+ self.close()
77
+
78
+ def write(self, text: Union[str, bytes]) -> None:
79
+ """Write text to stdout (and a file) and optionally flush."""
80
+ if isinstance(text, bytes):
81
+ text = text.decode()
82
+ if len(text) == 0: # workaround for a bug in VSCode debugger: sys.stdout.write(''); sys.stdout.flush() => crash
83
+ return
84
+
85
+ if self.file is not None:
86
+ self.file.write(text)
87
+
88
+ self.stdout.write(text)
89
+
90
+ if self.should_flush:
91
+ self.flush()
92
+
93
+ def flush(self) -> None:
94
+ """Flush written text to both stdout and a file, if open."""
95
+ if self.file is not None:
96
+ self.file.flush()
97
+
98
+ self.stdout.flush()
99
+
100
+ def close(self) -> None:
101
+ """Flush, close possible files, and remove stdout/stderr mirroring."""
102
+ self.flush()
103
+
104
+ # if using multiple loggers, prevent closing in wrong order
105
+ if sys.stdout is self:
106
+ sys.stdout = self.stdout
107
+ if sys.stderr is self:
108
+ sys.stderr = self.stderr
109
+
110
+ if self.file is not None:
111
+ self.file.close()
112
+ self.file = None
113
+
114
+
115
+ # Cache directories
116
+ # ------------------------------------------------------------------------------------------
117
+
118
+ _dnnlib_cache_dir = None
119
+
120
+ def set_cache_dir(path: str) -> None:
121
+ global _dnnlib_cache_dir
122
+ _dnnlib_cache_dir = path
123
+
124
+ def make_cache_dir_path(*paths: str) -> str:
125
+ if _dnnlib_cache_dir is not None:
126
+ return os.path.join(_dnnlib_cache_dir, *paths)
127
+ if 'DNNLIB_CACHE_DIR' in os.environ:
128
+ return os.path.join(os.environ['DNNLIB_CACHE_DIR'], *paths)
129
+ if 'HOME' in os.environ:
130
+ return os.path.join(os.environ['HOME'], '.cache', 'dnnlib', *paths)
131
+ if 'USERPROFILE' in os.environ:
132
+ return os.path.join(os.environ['USERPROFILE'], '.cache', 'dnnlib', *paths)
133
+ return os.path.join(tempfile.gettempdir(), '.cache', 'dnnlib', *paths)
134
+
135
+ # Small util functions
136
+ # ------------------------------------------------------------------------------------------
137
+
138
+
139
+ def format_time(seconds: Union[int, float]) -> str:
140
+ """Convert the seconds to human readable string with days, hours, minutes and seconds."""
141
+ s = int(np.rint(seconds))
142
+
143
+ if s < 60:
144
+ return "{0}s".format(s)
145
+ elif s < 60 * 60:
146
+ return "{0}m {1:02}s".format(s // 60, s % 60)
147
+ elif s < 24 * 60 * 60:
148
+ return "{0}h {1:02}m {2:02}s".format(s // (60 * 60), (s // 60) % 60, s % 60)
149
+ else:
150
+ return "{0}d {1:02}h {2:02}m".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24, (s // 60) % 60)
151
+
152
+
153
+ def format_time_brief(seconds: Union[int, float]) -> str:
154
+ """Convert the seconds to human readable string with days, hours, minutes and seconds."""
155
+ s = int(np.rint(seconds))
156
+
157
+ if s < 60:
158
+ return "{0}s".format(s)
159
+ elif s < 60 * 60:
160
+ return "{0}m {1:02}s".format(s // 60, s % 60)
161
+ elif s < 24 * 60 * 60:
162
+ return "{0}h {1:02}m".format(s // (60 * 60), (s // 60) % 60)
163
+ else:
164
+ return "{0}d {1:02}h".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24)
165
+
166
+
167
+ def ask_yes_no(question: str) -> bool:
168
+ """Ask the user the question until the user inputs a valid answer."""
169
+ while True:
170
+ try:
171
+ print("{0} [y/n]".format(question))
172
+ return strtobool(input().lower())
173
+ except ValueError:
174
+ pass
175
+
176
+
177
+ def tuple_product(t: Tuple) -> Any:
178
+ """Calculate the product of the tuple elements."""
179
+ result = 1
180
+
181
+ for v in t:
182
+ result *= v
183
+
184
+ return result
185
+
186
+
187
+ _str_to_ctype = {
188
+ "uint8": ctypes.c_ubyte,
189
+ "uint16": ctypes.c_uint16,
190
+ "uint32": ctypes.c_uint32,
191
+ "uint64": ctypes.c_uint64,
192
+ "int8": ctypes.c_byte,
193
+ "int16": ctypes.c_int16,
194
+ "int32": ctypes.c_int32,
195
+ "int64": ctypes.c_int64,
196
+ "float32": ctypes.c_float,
197
+ "float64": ctypes.c_double
198
+ }
199
+
200
+
201
+ def get_dtype_and_ctype(type_obj: Any) -> Tuple[np.dtype, Any]:
202
+ """Given a type name string (or an object having a __name__ attribute), return matching Numpy and ctypes types that have the same size in bytes."""
203
+ type_str = None
204
+
205
+ if isinstance(type_obj, str):
206
+ type_str = type_obj
207
+ elif hasattr(type_obj, "__name__"):
208
+ type_str = type_obj.__name__
209
+ elif hasattr(type_obj, "name"):
210
+ type_str = type_obj.name
211
+ else:
212
+ raise RuntimeError("Cannot infer type name from input")
213
+
214
+ assert type_str in _str_to_ctype.keys()
215
+
216
+ my_dtype = np.dtype(type_str)
217
+ my_ctype = _str_to_ctype[type_str]
218
+
219
+ assert my_dtype.itemsize == ctypes.sizeof(my_ctype)
220
+
221
+ return my_dtype, my_ctype
222
+
223
+
224
+ def is_pickleable(obj: Any) -> bool:
225
+ try:
226
+ with io.BytesIO() as stream:
227
+ pickle.dump(obj, stream)
228
+ return True
229
+ except:
230
+ return False
231
+
232
+
233
+ # Functionality to import modules/objects by name, and call functions by name
234
+ # ------------------------------------------------------------------------------------------
235
+
236
+ def get_module_from_obj_name(obj_name: str) -> Tuple[types.ModuleType, str]:
237
+ """Searches for the underlying module behind the name to some python object.
238
+ Returns the module and the object name (original name with module part removed)."""
239
+
240
+ # allow convenience shorthands, substitute them by full names
241
+ obj_name = re.sub("^np.", "numpy.", obj_name)
242
+ obj_name = re.sub("^tf.", "tensorflow.", obj_name)
243
+
244
+ # list alternatives for (module_name, local_obj_name)
245
+ parts = obj_name.split(".")
246
+ name_pairs = [(".".join(parts[:i]), ".".join(parts[i:])) for i in range(len(parts), 0, -1)]
247
+
248
+ # try each alternative in turn
249
+ for module_name, local_obj_name in name_pairs:
250
+ try:
251
+ module = importlib.import_module(module_name) # may raise ImportError
252
+ get_obj_from_module(module, local_obj_name) # may raise AttributeError
253
+ return module, local_obj_name
254
+ except:
255
+ pass
256
+
257
+ # maybe some of the modules themselves contain errors?
258
+ for module_name, _local_obj_name in name_pairs:
259
+ try:
260
+ importlib.import_module(module_name) # may raise ImportError
261
+ except ImportError:
262
+ if not str(sys.exc_info()[1]).startswith("No module named '" + module_name + "'"):
263
+ raise
264
+
265
+ # maybe the requested attribute is missing?
266
+ for module_name, local_obj_name in name_pairs:
267
+ try:
268
+ module = importlib.import_module(module_name) # may raise ImportError
269
+ get_obj_from_module(module, local_obj_name) # may raise AttributeError
270
+ except ImportError:
271
+ pass
272
+
273
+ # we are out of luck, but we have no idea why
274
+ raise ImportError(obj_name)
275
+
276
+
277
+ def get_obj_from_module(module: types.ModuleType, obj_name: str) -> Any:
278
+ """Traverses the object name and returns the last (rightmost) python object."""
279
+ if obj_name == '':
280
+ return module
281
+ obj = module
282
+ for part in obj_name.split("."):
283
+ obj = getattr(obj, part)
284
+ return obj
285
+
286
+
287
+ def get_obj_by_name(name: str) -> Any:
288
+ """Finds the python object with the given name."""
289
+ module, obj_name = get_module_from_obj_name(name)
290
+ return get_obj_from_module(module, obj_name)
291
+
292
+
293
+ def call_func_by_name(*args, func_name: str = None, **kwargs) -> Any:
294
+ """Finds the python object with the given name and calls it as a function."""
295
+ assert func_name is not None
296
+ func_obj = get_obj_by_name(func_name)
297
+ assert callable(func_obj)
298
+ return func_obj(*args, **kwargs)
299
+
300
+
301
+ def construct_class_by_name(*args, class_name: str = None, **kwargs) -> Any:
302
+ """Finds the python class with the given name and constructs it with the given arguments."""
303
+ return call_func_by_name(*args, func_name=class_name, **kwargs)
304
+
305
+
306
+ def get_module_dir_by_obj_name(obj_name: str) -> str:
307
+ """Get the directory path of the module containing the given object name."""
308
+ module, _ = get_module_from_obj_name(obj_name)
309
+ return os.path.dirname(inspect.getfile(module))
310
+
311
+
312
+ def is_top_level_function(obj: Any) -> bool:
313
+ """Determine whether the given object is a top-level function, i.e., defined at module scope using 'def'."""
314
+ return callable(obj) and obj.__name__ in sys.modules[obj.__module__].__dict__
315
+
316
+
317
+ def get_top_level_function_name(obj: Any) -> str:
318
+ """Return the fully-qualified name of a top-level function."""
319
+ assert is_top_level_function(obj)
320
+ module = obj.__module__
321
+ if module == '__main__':
322
+ module = os.path.splitext(os.path.basename(sys.modules[module].__file__))[0]
323
+ return module + "." + obj.__name__
324
+
325
+
326
+ # File system helpers
327
+ # ------------------------------------------------------------------------------------------
328
+
329
+ def list_dir_recursively_with_ignore(dir_path: str, ignores: List[str] = None, add_base_to_relative: bool = False) -> List[Tuple[str, str]]:
330
+ """List all files recursively in a given directory while ignoring given file and directory names.
331
+ Returns list of tuples containing both absolute and relative paths."""
332
+ assert os.path.isdir(dir_path)
333
+ base_name = os.path.basename(os.path.normpath(dir_path))
334
+
335
+ if ignores is None:
336
+ ignores = []
337
+
338
+ result = []
339
+
340
+ for root, dirs, files in os.walk(dir_path, topdown=True):
341
+ for ignore_ in ignores:
342
+ dirs_to_remove = [d for d in dirs if fnmatch.fnmatch(d, ignore_)]
343
+
344
+ # dirs need to be edited in-place
345
+ for d in dirs_to_remove:
346
+ dirs.remove(d)
347
+
348
+ files = [f for f in files if not fnmatch.fnmatch(f, ignore_)]
349
+
350
+ absolute_paths = [os.path.join(root, f) for f in files]
351
+ relative_paths = [os.path.relpath(p, dir_path) for p in absolute_paths]
352
+
353
+ if add_base_to_relative:
354
+ relative_paths = [os.path.join(base_name, p) for p in relative_paths]
355
+
356
+ assert len(absolute_paths) == len(relative_paths)
357
+ result += zip(absolute_paths, relative_paths)
358
+
359
+ return result
360
+
361
+
362
+ def copy_files_and_create_dirs(files: List[Tuple[str, str]]) -> None:
363
+ """Takes in a list of tuples of (src, dst) paths and copies files.
364
+ Will create all necessary directories."""
365
+ for file in files:
366
+ target_dir_name = os.path.dirname(file[1])
367
+
368
+ # will create all intermediate-level directories
369
+ if not os.path.exists(target_dir_name):
370
+ os.makedirs(target_dir_name)
371
+
372
+ shutil.copyfile(file[0], file[1])
373
+
374
+
375
+ # URL helpers
376
+ # ------------------------------------------------------------------------------------------
377
+
378
+ def is_url(obj: Any, allow_file_urls: bool = False) -> bool:
379
+ """Determine whether the given object is a valid URL string."""
380
+ if not isinstance(obj, str) or not "://" in obj:
381
+ return False
382
+ if allow_file_urls and obj.startswith('file://'):
383
+ return True
384
+ try:
385
+ res = requests.compat.urlparse(obj)
386
+ if not res.scheme or not res.netloc or not "." in res.netloc:
387
+ return False
388
+ res = requests.compat.urlparse(requests.compat.urljoin(obj, "/"))
389
+ if not res.scheme or not res.netloc or not "." in res.netloc:
390
+ return False
391
+ except:
392
+ return False
393
+ return True
394
+
395
+
396
+ def open_url(url: str, cache_dir: str = None, num_attempts: int = 10, verbose: bool = True, return_filename: bool = False, cache: bool = True) -> Any:
397
+ """Download the given URL and return a binary-mode file object to access the data."""
398
+ assert num_attempts >= 1
399
+ assert not (return_filename and (not cache))
400
+
401
+ # Doesn't look like an URL scheme so interpret it as a local filename.
402
+ if not re.match('^[a-z]+://', url):
403
+ return url if return_filename else open(url, "rb")
404
+
405
+ # Handle file URLs. This code handles unusual file:// patterns that
406
+ # arise on Windows:
407
+ #
408
+ # file:///c:/foo.txt
409
+ #
410
+ # which would translate to a local '/c:/foo.txt' filename that's
411
+ # invalid. Drop the forward slash for such pathnames.
412
+ #
413
+ # If you touch this code path, you should test it on both Linux and
414
+ # Windows.
415
+ #
416
+ # Some internet resources suggest using urllib.request.url2pathname() but
417
+ # but that converts forward slashes to backslashes and this causes
418
+ # its own set of problems.
419
+ if url.startswith('file://'):
420
+ filename = urllib.parse.urlparse(url).path
421
+ if re.match(r'^/[a-zA-Z]:', filename):
422
+ filename = filename[1:]
423
+ return filename if return_filename else open(filename, "rb")
424
+
425
+ assert is_url(url)
426
+
427
+ # Lookup from cache.
428
+ if cache_dir is None:
429
+ cache_dir = make_cache_dir_path('downloads')
430
+
431
+ url_md5 = hashlib.md5(url.encode("utf-8")).hexdigest()
432
+ if cache:
433
+ cache_files = glob.glob(os.path.join(cache_dir, url_md5 + "_*"))
434
+ if len(cache_files) == 1:
435
+ filename = cache_files[0]
436
+ return filename if return_filename else open(filename, "rb")
437
+
438
+ # Download.
439
+ url_name = None
440
+ url_data = None
441
+ with requests.Session() as session:
442
+ if verbose:
443
+ print("Downloading %s ..." % url, end="", flush=True)
444
+ for attempts_left in reversed(range(num_attempts)):
445
+ try:
446
+ with session.get(url) as res:
447
+ res.raise_for_status()
448
+ if len(res.content) == 0:
449
+ raise IOError("No data received")
450
+
451
+ if len(res.content) < 8192:
452
+ content_str = res.content.decode("utf-8")
453
+ if "download_warning" in res.headers.get("Set-Cookie", ""):
454
+ links = [html.unescape(link) for link in content_str.split('"') if "export=download" in link]
455
+ if len(links) == 1:
456
+ url = requests.compat.urljoin(url, links[0])
457
+ raise IOError("Google Drive virus checker nag")
458
+ if "Google Drive - Quota exceeded" in content_str:
459
+ raise IOError("Google Drive download quota exceeded -- please try again later")
460
+
461
+ match = re.search(r'filename="([^"]*)"', res.headers.get("Content-Disposition", ""))
462
+ url_name = match[1] if match else url
463
+ url_data = res.content
464
+ if verbose:
465
+ print(" done")
466
+ break
467
+ except KeyboardInterrupt:
468
+ raise
469
+ except:
470
+ if not attempts_left:
471
+ if verbose:
472
+ print(" failed")
473
+ raise
474
+ if verbose:
475
+ print(".", end="", flush=True)
476
+
477
+ # Save to cache.
478
+ if cache:
479
+ safe_name = re.sub(r"[^0-9a-zA-Z-._]", "_", url_name)
480
+ cache_file = os.path.join(cache_dir, url_md5 + "_" + safe_name)
481
+ temp_file = os.path.join(cache_dir, "tmp_" + uuid.uuid4().hex + "_" + url_md5 + "_" + safe_name)
482
+ os.makedirs(cache_dir, exist_ok=True)
483
+ with open(temp_file, "wb") as f:
484
+ f.write(url_data)
485
+ os.replace(temp_file, cache_file) # atomic
486
+ if return_filename:
487
+ return cache_file
488
+
489
+ # Return data as file object.
490
+ assert not return_filename
491
+ return io.BytesIO(url_data)
models/__init__.py ADDED
File without changes
models/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (162 Bytes). View file
models/__pycache__/base_generator.cpython-38.pyc ADDED
Binary file (8.5 kB). View file
models/__pycache__/model_settings.cpython-38.pyc ADDED
Binary file (1.99 kB). View file
models/__pycache__/pggan_generator.cpython-38.pyc ADDED
Binary file (5.04 kB). View file
models/__pycache__/pggan_generator_model.cpython-38.pyc ADDED
Binary file (11 kB). View file
models/__pycache__/stylegan2_generator.cpython-38.pyc ADDED
Binary file (6.72 kB). View file
models/__pycache__/stylegan3_generator.cpython-38.pyc ADDED
Binary file (6.73 kB). View file
models/__pycache__/stylegan3_official_network.cpython-38.pyc ADDED
Binary file (14.6 kB). View file
models/__pycache__/stylegan_generator.cpython-38.pyc ADDED
Binary file (9.4 kB). View file
models/__pycache__/stylegan_generator_model.cpython-38.pyc ADDED
Binary file (31.3 kB). View file
models/base_generator.py ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # python3.7
2
+ """Contains the base class for generator."""
3
+
4
+ import os
5
+ import sys
6
+ import logging
7
+ import numpy as np
8
+
9
+ import torch
10
+
11
+ from . import model_settings
12
+
13
+ __all__ = ['BaseGenerator']
14
+
15
+
16
+ def get_temp_logger(logger_name='logger'):
17
+ """Gets a temporary logger.
18
+
19
+ This logger will print all levels of messages onto the screen.
20
+
21
+ Args:
22
+ logger_name: Name of the logger.
23
+
24
+ Returns:
25
+ A `logging.Logger`.
26
+
27
+ Raises:
28
+ ValueError: If the input `logger_name` is empty.
29
+ """
30
+ if not logger_name:
31
+ raise ValueError(f'Input `logger_name` should not be empty!')
32
+
33
+ logger = logging.getLogger(logger_name)
34
+ if not logger.hasHandlers():
35
+ logger.setLevel(logging.DEBUG)
36
+ formatter = logging.Formatter("[%(asctime)s][%(levelname)s] %(message)s")
37
+ sh = logging.StreamHandler(stream=sys.stdout)
38
+ sh.setLevel(logging.DEBUG)
39
+ sh.setFormatter(formatter)
40
+ logger.addHandler(sh)
41
+
42
+ return logger
43
+
44
+
45
+ class BaseGenerator(object):
46
+ """Base class for generator used in GAN variants.
47
+
48
+ NOTE: The model should be defined with pytorch, and only used for inference.
49
+ """
50
+
51
+ def __init__(self, model_name, logger=None):
52
+ """Initializes with specific settings.
53
+
54
+ The model should be registered in `model_settings.py` with proper settings
55
+ first. Among them, some attributes are necessary, including:
56
+ (1) gan_type: Type of the GAN model.
57
+ (2) latent_space_dim: Dimension of the latent space. Should be a tuple.
58
+ (3) resolution: Resolution of the synthesis.
59
+ (4) min_val: Minimum value of the raw output. (default -1.0)
60
+ (5) max_val: Maximum value of the raw output. (default 1.0)
61
+ (6) channel_order: Channel order of the output image. (default: `RGB`)
62
+
63
+ Args:
64
+ model_name: Name with which the model is registered.
65
+ logger: Logger for recording log messages. If set as `None`, a default
66
+ logger, which prints messages from all levels to screen, will be
67
+ created. (default: None)
68
+
69
+ Raises:
70
+ AttributeError: If some necessary attributes are missing.
71
+ """
72
+ self.model_name = model_name
73
+ for key, val in model_settings.MODEL_POOL[model_name].items():
74
+ setattr(self, key, val)
75
+ self.use_cuda = model_settings.USE_CUDA
76
+ self.batch_size = model_settings.MAX_IMAGES_ON_DEVICE
77
+ self.logger = logger or get_temp_logger(model_name + '_generator')
78
+ self.model = None
79
+ self.run_device = 'cuda' if self.use_cuda else 'cpu'
80
+ self.cpu_device = 'cpu'
81
+
82
+ # Check necessary settings.
83
+ self.check_attr('gan_type')
84
+ self.check_attr('latent_space_dim')
85
+ self.check_attr('resolution')
86
+ self.min_val = getattr(self, 'min_val', -1.0)
87
+ self.max_val = getattr(self, 'max_val', 1.0)
88
+ self.output_channels = getattr(self, 'output_channels', 3)
89
+ self.channel_order = getattr(self, 'channel_order', 'RGB').upper()
90
+ assert self.channel_order in ['RGB', 'BGR']
91
+
92
+ # Build model and load pre-trained weights.
93
+ self.build()
94
+ if os.path.isfile(getattr(self, 'model_path', '')):
95
+ self.load()
96
+ elif os.path.isfile(getattr(self, 'tf_model_path', '')):
97
+ self.convert_tf_model()
98
+ else:
99
+ self.logger.warning(f'No pre-trained model will be loaded!')
100
+
101
+ # Change to inference mode and GPU mode if needed.
102
+ assert self.model
103
+ self.model.eval().to(self.run_device)
104
+
105
+ def check_attr(self, attr_name):
106
+ """Checks the existence of a particular attribute.
107
+
108
+ Args:
109
+ attr_name: Name of the attribute to check.
110
+
111
+ Raises:
112
+ AttributeError: If the target attribute is missing.
113
+ """
114
+ if not hasattr(self, attr_name):
115
+ raise AttributeError(
116
+ f'`{attr_name}` is missing for model `{self.model_name}`!')
117
+
118
+ def build(self):
119
+ """Builds the graph."""
120
+ raise NotImplementedError(f'Should be implemented in derived class!')
121
+
122
+ def load(self):
123
+ """Loads pre-trained weights."""
124
+ raise NotImplementedError(f'Should be implemented in derived class!')
125
+
126
+ def convert_tf_model(self, test_num=10):
127
+ """Converts models weights from tensorflow version.
128
+
129
+ Args:
130
+ test_num: Number of images to generate for testing whether the conversion
131
+ is done correctly. `0` means skipping the test. (default 10)
132
+ """
133
+ raise NotImplementedError(f'Should be implemented in derived class!')
134
+
135
+ def sample(self, num):
136
+ """Samples latent codes randomly.
137
+
138
+ Args:
139
+ num: Number of latent codes to sample. Should be positive.
140
+
141
+ Returns:
142
+ A `numpy.ndarray` as sampled latend codes.
143
+ """
144
+ raise NotImplementedError(f'Should be implemented in derived class!')
145
+
146
+ def preprocess(self, latent_codes):
147
+ """Preprocesses the input latent code if needed.
148
+
149
+ Args:
150
+ latent_codes: The input latent codes for preprocessing.
151
+
152
+ Returns:
153
+ The preprocessed latent codes which can be used as final input for the
154
+ generator.
155
+ """
156
+ raise NotImplementedError(f'Should be implemented in derived class!')
157
+
158
+ def easy_sample(self, num):
159
+ """Wraps functions `sample()` and `preprocess()` together."""
160
+ return self.preprocess(self.sample(num))
161
+
162
+ def synthesize(self, latent_codes):
163
+ """Synthesizes images with given latent codes.
164
+
165
+ NOTE: The latent codes should have already been preprocessed.
166
+
167
+ Args:
168
+ latent_codes: Input latent codes for image synthesis.
169
+
170
+ Returns:
171
+ A dictionary whose values are raw outputs from the generator.
172
+ """
173
+ raise NotImplementedError(f'Should be implemented in derived class!')
174
+
175
+ def get_value(self, tensor):
176
+ """Gets value of a `torch.Tensor`.
177
+
178
+ Args:
179
+ tensor: The input tensor to get value from.
180
+
181
+ Returns:
182
+ A `numpy.ndarray`.
183
+
184
+ Raises:
185
+ ValueError: If the tensor is with neither `torch.Tensor` type or
186
+ `numpy.ndarray` type.
187
+ """
188
+ if isinstance(tensor, np.ndarray):
189
+ return tensor
190
+ if isinstance(tensor, torch.Tensor):
191
+ return tensor.to(self.cpu_device).detach().numpy()
192
+ raise ValueError(f'Unsupported input type `{type(tensor)}`!')
193
+
194
+ def postprocess(self, images):
195
+ """Postprocesses the output images if needed.
196
+
197
+ This function assumes the input numpy array is with shape [batch_size,
198
+ channel, height, width]. Here, `channel = 3` for color image and
199
+ `channel = 1` for grayscale image. The return images are with shape
200
+ [batch_size, height, width, channel]. NOTE: The channel order of output
201
+ image will always be `RGB`.
202
+
203
+ Args:
204
+ images: The raw output from the generator.
205
+
206
+ Returns:
207
+ The postprocessed images with dtype `numpy.uint8` with range [0, 255].
208
+
209
+ Raises:
210
+ ValueError: If the input `images` are not with type `numpy.ndarray` or not
211
+ with shape [batch_size, channel, height, width].
212
+ """
213
+ if not isinstance(images, np.ndarray):
214
+ raise ValueError(f'Images should be with type `numpy.ndarray`!')
215
+ if ('stylegan3' not in self.model_name) and ('stylegan2' not in self.model_name):
216
+ images_shape = images.shape
217
+ if len(images_shape) != 4 or images_shape[1] not in [1, 3]:
218
+ raise ValueError(f'Input should be with shape [batch_size, channel, '
219
+ f'height, width], where channel equals to 1 or 3. '
220
+ f'But {images_shape} is received!')
221
+ images = (images - self.min_val) * 255 / (self.max_val - self.min_val)
222
+ images = np.clip(images + 0.5, 0, 255).astype(np.uint8)
223
+ images = images.transpose(0, 2, 3, 1)
224
+ if self.channel_order == 'BGR':
225
+ images = images[:, :, :, ::-1]
226
+
227
+ return images
228
+
229
+ def easy_synthesize(self, latent_codes, **kwargs):
230
+ """Wraps functions `synthesize()` and `postprocess()` together."""
231
+ outputs = self.synthesize(latent_codes, **kwargs)
232
+ if 'image' in outputs:
233
+ outputs['image'] = self.postprocess(outputs['image'])
234
+
235
+ return outputs
236
+
237
+ def get_batch_inputs(self, latent_codes):
238
+ """Gets batch inputs from a collection of latent codes.
239
+
240
+ This function will yield at most `self.batch_size` latent_codes at a time.
241
+
242
+ Args:
243
+ latent_codes: The input latent codes for generation. First dimension
244
+ should be the total number.
245
+ """
246
+ total_num = latent_codes.shape[0]
247
+ for i in range(0, total_num, self.batch_size):
248
+ yield latent_codes[i:i + self.batch_size]
models/model_settings.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # python3.7
2
+ """Contains basic configurations for models used in this project.
3
+
4
+ Please download the public released models from the following two repositories
5
+ OR train your own models, and then put them into `pretrain` folder.
6
+
7
+ ProgressiveGAN: https://github.com/tkarras/progressive_growing_of_gans
8
+ StyleGAN: https://github.com/NVlabs/stylegan
9
+ StyleGAN:
10
+
11
+ NOTE: Any new model should be registered in `MODEL_POOL` before using.
12
+ """
13
+
14
+ import os.path
15
+
16
+ BASE_DIR = os.path.dirname(os.path.relpath(__file__))
17
+
18
+ MODEL_DIR = BASE_DIR + '/pretrain'
19
+
20
+ MODEL_POOL = {
21
+ 'pggan_celebahq': {
22
+ 'tf_model_path': MODEL_DIR + '/karras2018iclr-celebahq-1024x1024.pkl',
23
+ 'model_path': MODEL_DIR + '/pggan_celebahq.pth',
24
+ 'gan_type': 'pggan',
25
+ 'dataset_name': 'celebahq',
26
+ 'latent_space_dim': 512,
27
+ 'resolution': 1024,
28
+ 'min_val': -1.0,
29
+ 'max_val': 1.0,
30
+ 'output_channels': 3,
31
+ 'channel_order': 'RGB',
32
+ 'fused_scale': False,
33
+ },
34
+ 'stylegan_celebahq': {
35
+ 'tf_model_path':
36
+ MODEL_DIR + '/karras2019stylegan-celebahq-1024x1024.pkl',
37
+ 'model_path': MODEL_DIR + '/stylegan_celebahq.pth',
38
+ 'gan_type': 'stylegan',
39
+ 'dataset_name': 'celebahq',
40
+ 'latent_space_dim': 512,
41
+ 'w_space_dim': 512,
42
+ 'resolution': 1024,
43
+ 'min_val': -1.0,
44
+ 'max_val': 1.0,
45
+ 'output_channels': 3,
46
+ 'channel_order': 'RGB',
47
+ 'fused_scale': 'auto',
48
+ },
49
+ 'stylegan_ffhq': {
50
+ 'tf_model_path': MODEL_DIR + '/karras2019stylegan-ffhq-1024x1024.pkl',
51
+ 'model_path': MODEL_DIR + '/stylegan_ffhq.pth',
52
+ 'gan_type': 'stylegan',
53
+ 'dataset_name': 'ffhq',
54
+ 'latent_space_dim': 512,
55
+ 'w_space_dim': 512,
56
+ 'resolution': 1024,
57
+ 'min_val': -1.0,
58
+ 'max_val': 1.0,
59
+ 'output_channels': 3,
60
+ 'channel_order': 'RGB',
61
+ 'fused_scale': 'auto',
62
+ },
63
+ 'stylegan2_ffhq': {
64
+ 'tf_model_path': MODEL_DIR + '/karras2019stylegan-ffhq-1024x1024.pkl',
65
+ 'model_path': MODEL_DIR + '/stylegan2-ffhq-1024x1024.pkl',
66
+ 'gan_type': 'stylegan2',
67
+ 'dataset_name': 'ffhq',
68
+ 'latent_space_dim': 512,
69
+ 'w_space_dim': 512,
70
+ 'c_space_dim': 512,
71
+ 'resolution': 1024,
72
+ 'min_val': -1.0,
73
+ 'max_val': 1.0,
74
+ 'output_channels': 3,
75
+ 'channel_order': 'RGB',
76
+ 'fused_scale': 'auto',
77
+ },
78
+ 'stylegan3_ffhq': {
79
+ 'model_path': MODEL_DIR + '/stylegan3-t-ffhq-1024x1024.pkl',
80
+ 'gan_type': 'stylegan3',
81
+ 'dataset_name': 'ffhq',
82
+ 'latent_space_dim': 512,
83
+ 'w_space_dim': 512,
84
+ 'c_space_dim': 512,
85
+ 'resolution': 1024,
86
+ 'min_val': -1.0,
87
+ 'max_val': 1.0,
88
+ 'output_channels': 3,
89
+ 'channel_order': 'RGB',
90
+ 'fused_scale': 'auto',
91
+ },
92
+ }
93
+
94
+ # Settings for StyleGAN.
95
+ STYLEGAN_TRUNCATION_PSI = 0.7 # 1.0 means no truncation
96
+ STYLEGAN_TRUNCATION_LAYERS = 8 # 0 means no truncation
97
+ STYLEGAN_RANDOMIZE_NOISE = False
98
+
99
+ # Settings for model running.
100
+ USE_CUDA = False
101
+
102
+ MAX_IMAGES_ON_DEVICE = 8
models/pggan_generator.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # python3.7
2
+ """Contains the generator class of ProgressiveGAN.
3
+
4
+ Basically, this class is derived from the `BaseGenerator` class defined in
5
+ `base_generator.py`.
6
+ """
7
+
8
+ import os
9
+ import numpy as np
10
+
11
+ import torch
12
+
13
+ from . import model_settings
14
+ from .pggan_generator_model import PGGANGeneratorModel
15
+ from .base_generator import BaseGenerator
16
+
17
+ __all__ = ['PGGANGenerator']
18
+
19
+
20
+ class PGGANGenerator(BaseGenerator):
21
+ """Defines the generator class of ProgressiveGAN."""
22
+
23
+ def __init__(self, model_name, logger=None):
24
+ super().__init__(model_name, logger)
25
+ assert self.gan_type == 'pggan'
26
+
27
+ def build(self):
28
+ self.check_attr('fused_scale')
29
+ self.model = PGGANGeneratorModel(resolution=self.resolution,
30
+ fused_scale=self.fused_scale,
31
+ output_channels=self.output_channels)
32
+
33
+ def load(self):
34
+ self.logger.info(f'Loading pytorch model from `{self.model_path}`.')
35
+ self.model.load_state_dict(torch.load(self.model_path))
36
+ self.logger.info(f'Successfully loaded!')
37
+ self.lod = self.model.lod.to(self.cpu_device).tolist()
38
+ self.logger.info(f' `lod` of the loaded model is {self.lod}.')
39
+
40
+ def convert_tf_model(self, test_num=10):
41
+ import sys
42
+ import pickle
43
+ import tensorflow as tf
44
+ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
45
+ sys.path.append(model_settings.BASE_DIR + '/pggan_tf_official')
46
+
47
+ self.logger.info(f'Loading tensorflow model from `{self.tf_model_path}`.')
48
+ tf.InteractiveSession()
49
+ with open(self.tf_model_path, 'rb') as f:
50
+ _, _, tf_model = pickle.load(f)
51
+ self.logger.info(f'Successfully loaded!')
52
+
53
+ self.logger.info(f'Converting tensorflow model to pytorch version.')
54
+ tf_vars = dict(tf_model.__getstate__()['variables'])
55
+ state_dict = self.model.state_dict()
56
+ for pth_var_name, tf_var_name in self.model.pth_to_tf_var_mapping.items():
57
+ if 'ToRGB_lod' in tf_var_name:
58
+ lod = int(tf_var_name[len('ToRGB_lod')])
59
+ lod_shift = 10 - int(np.log2(self.resolution))
60
+ tf_var_name = tf_var_name.replace(f'{lod}', f'{lod - lod_shift}')
61
+ if tf_var_name not in tf_vars:
62
+ self.logger.debug(f'Variable `{tf_var_name}` does not exist in '
63
+ f'tensorflow model.')
64
+ continue
65
+ self.logger.debug(f' Converting `{tf_var_name}` to `{pth_var_name}`.')
66
+ var = torch.from_numpy(np.array(tf_vars[tf_var_name]))
67
+ if 'weight' in pth_var_name:
68
+ if 'layer0.conv' in pth_var_name:
69
+ var = var.view(var.shape[0], -1, 4, 4).permute(1, 0, 2, 3).flip(2, 3)
70
+ elif 'Conv0_up' in tf_var_name:
71
+ var = var.permute(0, 1, 3, 2)
72
+ else:
73
+ var = var.permute(3, 2, 0, 1)
74
+ state_dict[pth_var_name] = var
75
+ self.logger.info(f'Successfully converted!')
76
+
77
+ self.logger.info(f'Saving pytorch model to `{self.model_path}`.')
78
+ torch.save(state_dict, self.model_path)
79
+ self.logger.info(f'Successfully saved!')
80
+
81
+ self.load()
82
+
83
+ # Official tensorflow model can only run on GPU.
84
+ if test_num <= 0 or not tf.test.is_built_with_cuda():
85
+ return
86
+ self.logger.info(f'Testing conversion results.')
87
+ self.model.eval().to(self.run_device)
88
+ label_dim = tf_model.input_shapes[1][1]
89
+ tf_fake_label = np.zeros((1, label_dim), np.float32)
90
+ total_distance = 0.0
91
+ for i in range(test_num):
92
+ latent_code = self.easy_sample(1)
93
+ tf_output = tf_model.run(latent_code, tf_fake_label)
94
+ pth_output = self.synthesize(latent_code)['image']
95
+ distance = np.average(np.abs(tf_output - pth_output))
96
+ self.logger.debug(f' Test {i:03d}: distance {distance:.6e}.')
97
+ total_distance += distance
98
+ self.logger.info(f'Average distance is {total_distance / test_num:.6e}.')
99
+
100
+ def sample(self, num):
101
+ assert num > 0
102
+ return np.random.randn(num, self.latent_space_dim).astype(np.float32)
103
+
104
+ def preprocess(self, latent_codes):
105
+ if not isinstance(latent_codes, np.ndarray):
106
+ raise ValueError(f'Latent codes should be with type `numpy.ndarray`!')
107
+
108
+ latent_codes = latent_codes.reshape(-1, self.latent_space_dim)
109
+ norm = np.linalg.norm(latent_codes, axis=1, keepdims=True)
110
+ latent_codes = latent_codes / norm * np.sqrt(self.latent_space_dim)
111
+ return latent_codes.astype(np.float32)
112
+
113
+ def synthesize(self, latent_codes):
114
+ if not isinstance(latent_codes, np.ndarray):
115
+ raise ValueError(f'Latent codes should be with type `numpy.ndarray`!')
116
+ latent_codes_shape = latent_codes.shape
117
+ if not (len(latent_codes_shape) == 2 and
118
+ latent_codes_shape[0] <= self.batch_size and
119
+ latent_codes_shape[1] == self.latent_space_dim):
120
+ raise ValueError(f'Latent_codes should be with shape [batch_size, '
121
+ f'latent_space_dim], where `batch_size` no larger than '
122
+ f'{self.batch_size}, and `latent_space_dim` equal to '
123
+ f'{self.latent_space_dim}!\n'
124
+ f'But {latent_codes_shape} received!')
125
+
126
+ zs = torch.from_numpy(latent_codes).type(torch.FloatTensor)
127
+ zs = zs.to(self.run_device)
128
+ images = self.model(zs)
129
+ results = {
130
+ 'z': latent_codes,
131
+ 'image': self.get_value(images),
132
+ }
133
+ return results
models/pggan_generator_model.py ADDED
@@ -0,0 +1,322 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # python3.7
2
+ """Contains the implementation of generator described in ProgressiveGAN.
3
+
4
+ Different from the official tensorflow model in folder `pggan_tf_official`, this
5
+ is a simple pytorch version which only contains the generator part. This class
6
+ is specially used for inference.
7
+
8
+ For more details, please check the original paper:
9
+ https://arxiv.org/pdf/1710.10196.pdf
10
+ """
11
+
12
+ import numpy as np
13
+
14
+ import torch
15
+ import torch.nn as nn
16
+ import torch.nn.functional as F
17
+
18
+ __all__ = ['PGGANGeneratorModel']
19
+
20
+ # Defines a dictionary, which maps the target resolution of the final generated
21
+ # image to numbers of filters used in each convolutional layer in sequence.
22
+ _RESOLUTIONS_TO_CHANNELS = {
23
+ 8: [512, 512, 512],
24
+ 16: [512, 512, 512, 512],
25
+ 32: [512, 512, 512, 512, 512],
26
+ 64: [512, 512, 512, 512, 512, 256],
27
+ 128: [512, 512, 512, 512, 512, 256, 128],
28
+ 256: [512, 512, 512, 512, 512, 256, 128, 64],
29
+ 512: [512, 512, 512, 512, 512, 256, 128, 64, 32],
30
+ 1024: [512, 512, 512, 512, 512, 256, 128, 64, 32, 16],
31
+ }
32
+
33
+ # Variable mapping from pytorch model to official tensorflow model.
34
+ _PGGAN_PTH_VARS_TO_TF_VARS = {
35
+ 'lod': 'lod', # []
36
+ 'layer0.conv.weight': '4x4/Dense/weight', # [512, 512, 4, 4]
37
+ 'layer0.wscale.bias': '4x4/Dense/bias', # [512]
38
+ 'layer1.conv.weight': '4x4/Conv/weight', # [512, 512, 3, 3]
39
+ 'layer1.wscale.bias': '4x4/Conv/bias', # [512]
40
+ 'layer2.conv.weight': '8x8/Conv0/weight', # [512, 512, 3, 3]
41
+ 'layer2.wscale.bias': '8x8/Conv0/bias', # [512]
42
+ 'layer3.conv.weight': '8x8/Conv1/weight', # [512, 512, 3, 3]
43
+ 'layer3.wscale.bias': '8x8/Conv1/bias', # [512]
44
+ 'layer4.conv.weight': '16x16/Conv0/weight', # [512, 512, 3, 3]
45
+ 'layer4.wscale.bias': '16x16/Conv0/bias', # [512]
46
+ 'layer5.conv.weight': '16x16/Conv1/weight', # [512, 512, 3, 3]
47
+ 'layer5.wscale.bias': '16x16/Conv1/bias', # [512]
48
+ 'layer6.conv.weight': '32x32/Conv0/weight', # [512, 512, 3, 3]
49
+ 'layer6.wscale.bias': '32x32/Conv0/bias', # [512]
50
+ 'layer7.conv.weight': '32x32/Conv1/weight', # [512, 512, 3, 3]
51
+ 'layer7.wscale.bias': '32x32/Conv1/bias', # [512]
52
+ 'layer8.conv.weight': '64x64/Conv0/weight', # [256, 512, 3, 3]
53
+ 'layer8.wscale.bias': '64x64/Conv0/bias', # [256]
54
+ 'layer9.conv.weight': '64x64/Conv1/weight', # [256, 256, 3, 3]
55
+ 'layer9.wscale.bias': '64x64/Conv1/bias', # [256]
56
+ 'layer10.conv.weight': '128x128/Conv0/weight', # [128, 256, 3, 3]
57
+ 'layer10.wscale.bias': '128x128/Conv0/bias', # [128]
58
+ 'layer11.conv.weight': '128x128/Conv1/weight', # [128, 128, 3, 3]
59
+ 'layer11.wscale.bias': '128x128/Conv1/bias', # [128]
60
+ 'layer12.conv.weight': '256x256/Conv0/weight', # [64, 128, 3, 3]
61
+ 'layer12.wscale.bias': '256x256/Conv0/bias', # [64]
62
+ 'layer13.conv.weight': '256x256/Conv1/weight', # [64, 64, 3, 3]
63
+ 'layer13.wscale.bias': '256x256/Conv1/bias', # [64]
64
+ 'layer14.conv.weight': '512x512/Conv0/weight', # [32, 64, 3, 3]
65
+ 'layer14.wscale.bias': '512x512/Conv0/bias', # [32]
66
+ 'layer15.conv.weight': '512x512/Conv1/weight', # [32, 32, 3, 3]
67
+ 'layer15.wscale.bias': '512x512/Conv1/bias', # [32]
68
+ 'layer16.conv.weight': '1024x1024/Conv0/weight', # [16, 32, 3, 3]
69
+ 'layer16.wscale.bias': '1024x1024/Conv0/bias', # [16]
70
+ 'layer17.conv.weight': '1024x1024/Conv1/weight', # [16, 16, 3, 3]
71
+ 'layer17.wscale.bias': '1024x1024/Conv1/bias', # [16]
72
+ 'output0.conv.weight': 'ToRGB_lod8/weight', # [3, 512, 1, 1]
73
+ 'output0.wscale.bias': 'ToRGB_lod8/bias', # [3]
74
+ 'output1.conv.weight': 'ToRGB_lod7/weight', # [3, 512, 1, 1]
75
+ 'output1.wscale.bias': 'ToRGB_lod7/bias', # [3]
76
+ 'output2.conv.weight': 'ToRGB_lod6/weight', # [3, 512, 1, 1]
77
+ 'output2.wscale.bias': 'ToRGB_lod6/bias', # [3]
78
+ 'output3.conv.weight': 'ToRGB_lod5/weight', # [3, 512, 1, 1]
79
+ 'output3.wscale.bias': 'ToRGB_lod5/bias', # [3]
80
+ 'output4.conv.weight': 'ToRGB_lod4/weight', # [3, 256, 1, 1]
81
+ 'output4.wscale.bias': 'ToRGB_lod4/bias', # [3]
82
+ 'output5.conv.weight': 'ToRGB_lod3/weight', # [3, 128, 1, 1]
83
+ 'output5.wscale.bias': 'ToRGB_lod3/bias', # [3]
84
+ 'output6.conv.weight': 'ToRGB_lod2/weight', # [3, 64, 1, 1]
85
+ 'output6.wscale.bias': 'ToRGB_lod2/bias', # [3]
86
+ 'output7.conv.weight': 'ToRGB_lod1/weight', # [3, 32, 1, 1]
87
+ 'output7.wscale.bias': 'ToRGB_lod1/bias', # [3]
88
+ 'output8.conv.weight': 'ToRGB_lod0/weight', # [3, 16, 1, 1]
89
+ 'output8.wscale.bias': 'ToRGB_lod0/bias', # [3]
90
+ }
91
+
92
+
93
+ class PGGANGeneratorModel(nn.Module):
94
+ """Defines the generator module in ProgressiveGAN.
95
+
96
+ Note that the generated images are with RGB color channels with range [-1, 1].
97
+ """
98
+
99
+ def __init__(self,
100
+ resolution=1024,
101
+ fused_scale=False,
102
+ output_channels=3):
103
+ """Initializes the generator with basic settings.
104
+
105
+ Args:
106
+ resolution: The resolution of the final output image. (default: 1024)
107
+ fused_scale: Whether to fused `upsample` and `conv2d` together, resulting
108
+ in `conv2_transpose`. (default: False)
109
+ output_channels: Number of channels of the output image. (default: 3)
110
+
111
+ Raises:
112
+ ValueError: If the input `resolution` is not supported.
113
+ """
114
+ super().__init__()
115
+
116
+ try:
117
+ self.channels = _RESOLUTIONS_TO_CHANNELS[resolution]
118
+ except KeyError:
119
+ raise ValueError(f'Invalid resolution: {resolution}!\n'
120
+ f'Resolutions allowed: '
121
+ f'{list(_RESOLUTIONS_TO_CHANNELS)}.')
122
+ assert len(self.channels) == int(np.log2(resolution))
123
+
124
+ self.resolution = resolution
125
+ self.fused_scale = fused_scale
126
+ self.output_channels = output_channels
127
+
128
+ for block_idx in range(1, len(self.channels)):
129
+ if block_idx == 1:
130
+ self.add_module(
131
+ f'layer{2 * block_idx - 2}',
132
+ ConvBlock(in_channels=self.channels[block_idx - 1],
133
+ out_channels=self.channels[block_idx],
134
+ kernel_size=4,
135
+ padding=3))
136
+ else:
137
+ self.add_module(
138
+ f'layer{2 * block_idx - 2}',
139
+ ConvBlock(in_channels=self.channels[block_idx - 1],
140
+ out_channels=self.channels[block_idx],
141
+ upsample=True,
142
+ fused_scale=self.fused_scale))
143
+ self.add_module(
144
+ f'layer{2 * block_idx - 1}',
145
+ ConvBlock(in_channels=self.channels[block_idx],
146
+ out_channels=self.channels[block_idx]))
147
+ self.add_module(
148
+ f'output{block_idx - 1}',
149
+ ConvBlock(in_channels=self.channels[block_idx],
150
+ out_channels=self.output_channels,
151
+ kernel_size=1,
152
+ padding=0,
153
+ wscale_gain=1.0,
154
+ activation_type='linear'))
155
+
156
+ self.upsample = ResolutionScalingLayer()
157
+ self.lod = nn.Parameter(torch.zeros(()))
158
+
159
+ self.pth_to_tf_var_mapping = {}
160
+ for pth_var_name, tf_var_name in _PGGAN_PTH_VARS_TO_TF_VARS.items():
161
+ if self.fused_scale and 'Conv0' in tf_var_name:
162
+ pth_var_name = pth_var_name.replace('conv.weight', 'weight')
163
+ tf_var_name = tf_var_name.replace('Conv0', 'Conv0_up')
164
+ self.pth_to_tf_var_mapping[pth_var_name] = tf_var_name
165
+
166
+ def forward(self, x):
167
+ if len(x.shape) != 2:
168
+ raise ValueError(f'The input tensor should be with shape [batch_size, '
169
+ f'noise_dim], but {x.shape} received!')
170
+ x = x.view(x.shape[0], x.shape[1], 1, 1)
171
+
172
+ lod = self.lod.cpu().tolist()
173
+ for block_idx in range(1, len(self.channels)):
174
+ if block_idx + lod < len(self.channels):
175
+ x = self.__getattr__(f'layer{2 * block_idx - 2}')(x)
176
+ x = self.__getattr__(f'layer{2 * block_idx - 1}')(x)
177
+ image = self.__getattr__(f'output{block_idx - 1}')(x)
178
+ else:
179
+ image = self.upsample(image)
180
+ return image
181
+
182
+
183
+ class PixelNormLayer(nn.Module):
184
+ """Implements pixel-wise feature vector normalization layer."""
185
+
186
+ def __init__(self, epsilon=1e-8):
187
+ super().__init__()
188
+ self.epsilon = epsilon
189
+
190
+ def forward(self, x):
191
+ return x / torch.sqrt(torch.mean(x**2, dim=1, keepdim=True) + self.epsilon)
192
+
193
+
194
+ class ResolutionScalingLayer(nn.Module):
195
+ """Implements the resolution scaling layer.
196
+
197
+ Basically, this layer can be used to upsample or downsample feature maps from
198
+ spatial domain with nearest neighbor interpolation.
199
+ """
200
+
201
+ def __init__(self, scale_factor=2):
202
+ super().__init__()
203
+ self.scale_factor = scale_factor
204
+
205
+ def forward(self, x):
206
+ return F.interpolate(x, scale_factor=self.scale_factor, mode='nearest')
207
+
208
+
209
+ class WScaleLayer(nn.Module):
210
+ """Implements the layer to scale weight variable and add bias.
211
+
212
+ Note that, the weight variable is trained in `nn.Conv2d` layer, and only
213
+ scaled with a constant number, which is not trainable, in this layer. However,
214
+ the bias variable is trainable in this layer.
215
+ """
216
+
217
+ def __init__(self, in_channels, out_channels, kernel_size, gain=np.sqrt(2.0)):
218
+ super().__init__()
219
+ fan_in = in_channels * kernel_size * kernel_size
220
+ self.scale = gain / np.sqrt(fan_in)
221
+ self.bias = nn.Parameter(torch.zeros(out_channels))
222
+
223
+ def forward(self, x):
224
+ return x * self.scale + self.bias.view(1, -1, 1, 1)
225
+
226
+
227
+ class ConvBlock(nn.Module):
228
+ """Implements the convolutional block used in ProgressiveGAN.
229
+
230
+ Basically, this block executes pixel-wise normalization layer, upsampling
231
+ layer (if needed), convolutional layer, weight-scale layer, and activation
232
+ layer in sequence.
233
+ """
234
+
235
+ def __init__(self,
236
+ in_channels,
237
+ out_channels,
238
+ kernel_size=3,
239
+ stride=1,
240
+ padding=1,
241
+ dilation=1,
242
+ add_bias=False,
243
+ upsample=False,
244
+ fused_scale=False,
245
+ wscale_gain=np.sqrt(2.0),
246
+ activation_type='lrelu'):
247
+ """Initializes the class with block settings.
248
+
249
+ Args:
250
+ in_channels: Number of channels of the input tensor fed into this block.
251
+ out_channels: Number of channels (kernels) of the output tensor.
252
+ kernel_size: Size of the convolutional kernel.
253
+ stride: Stride parameter for convolution operation.
254
+ padding: Padding parameter for convolution operation.
255
+ dilation: Dilation rate for convolution operation.
256
+ add_bias: Whether to add bias onto the convolutional result.
257
+ upsample: Whether to upsample the input tensor before convolution.
258
+ fused_scale: Whether to fused `upsample` and `conv2d` together, resulting
259
+ in `conv2_transpose`.
260
+ wscale_gain: The gain factor for `wscale` layer.
261
+ wscale_lr_multiplier: The learning rate multiplier factor for `wscale`
262
+ layer.
263
+ activation_type: Type of activation function. Support `linear`, `lrelu`
264
+ and `tanh`.
265
+
266
+ Raises:
267
+ NotImplementedError: If the input `activation_type` is not supported.
268
+ """
269
+ super().__init__()
270
+ self.pixel_norm = PixelNormLayer()
271
+
272
+ if upsample and not fused_scale:
273
+ self.upsample = ResolutionScalingLayer()
274
+ else:
275
+ self.upsample = nn.Identity()
276
+
277
+ if upsample and fused_scale:
278
+ self.weight = nn.Parameter(
279
+ torch.randn(kernel_size, kernel_size, in_channels, out_channels))
280
+ fan_in = in_channels * kernel_size * kernel_size
281
+ self.scale = wscale_gain / np.sqrt(fan_in)
282
+ else:
283
+ self.conv = nn.Conv2d(in_channels=in_channels,
284
+ out_channels=out_channels,
285
+ kernel_size=kernel_size,
286
+ stride=stride,
287
+ padding=padding,
288
+ dilation=dilation,
289
+ groups=1,
290
+ bias=add_bias)
291
+
292
+ self.wscale = WScaleLayer(in_channels=in_channels,
293
+ out_channels=out_channels,
294
+ kernel_size=kernel_size,
295
+ gain=wscale_gain)
296
+
297
+ if activation_type == 'linear':
298
+ self.activate = nn.Identity()
299
+ elif activation_type == 'lrelu':
300
+ self.activate = nn.LeakyReLU(negative_slope=0.2, inplace=True)
301
+ elif activation_type == 'tanh':
302
+ self.activate = nn.Hardtanh()
303
+ else:
304
+ raise NotImplementedError(f'Not implemented activation function: '
305
+ f'{activation_type}!')
306
+
307
+ def forward(self, x):
308
+ x = self.pixel_norm(x)
309
+ x = self.upsample(x)
310
+ if hasattr(self, 'conv'):
311
+ x = self.conv(x)
312
+ else:
313
+ kernel = self.weight * self.scale
314
+ kernel = F.pad(kernel, (0, 0, 0, 0, 1, 1, 1, 1), 'constant', 0.0)
315
+ kernel = (kernel[1:, 1:] + kernel[:-1, 1:] +
316
+ kernel[1:, :-1] + kernel[:-1, :-1])
317
+ kernel = kernel.permute(2, 3, 0, 1)
318
+ x = F.conv_transpose2d(x, kernel, stride=2, padding=1)
319
+ x = x / self.scale
320
+ x = self.wscale(x)
321
+ x = self.activate(x)
322
+ return x
models/pggan_tf_official/LICENSE.txt ADDED
@@ -0,0 +1,410 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
2
+
3
+
4
+ Attribution-NonCommercial 4.0 International
5
+
6
+ =======================================================================
7
+
8
+ Creative Commons Corporation ("Creative Commons") is not a law firm and
9
+ does not provide legal services or legal advice. Distribution of
10
+ Creative Commons public licenses does not create a lawyer-client or
11
+ other relationship. Creative Commons makes its licenses and related
12
+ information available on an "as-is" basis. Creative Commons gives no
13
+ warranties regarding its licenses, any material licensed under their
14
+ terms and conditions, or any related information. Creative Commons
15
+ disclaims all liability for damages resulting from their use to the
16
+ fullest extent possible.
17
+
18
+ Using Creative Commons Public Licenses
19
+
20
+ Creative Commons public licenses provide a standard set of terms and
21
+ conditions that creators and other rights holders may use to share
22
+ original works of authorship and other material subject to copyright
23
+ and certain other rights specified in the public license below. The
24
+ following considerations are for informational purposes only, are not
25
+ exhaustive, and do not form part of our licenses.
26
+
27
+ Considerations for licensors: Our public licenses are
28
+ intended for use by those authorized to give the public
29
+ permission to use material in ways otherwise restricted by
30
+ copyright and certain other rights. Our licenses are
31
+ irrevocable. Licensors should read and understand the terms
32
+ and conditions of the license they choose before applying it.
33
+ Licensors should also secure all rights necessary before
34
+ applying our licenses so that the public can reuse the
35
+ material as expected. Licensors should clearly mark any
36
+ material not subject to the license. This includes other CC-
37
+ licensed material, or material used under an exception or
38
+ limitation to copyright. More considerations for licensors:
39
+ wiki.creativecommons.org/Considerations_for_licensors
40
+
41
+ Considerations for the public: By using one of our public
42
+ licenses, a licensor grants the public permission to use the
43
+ licensed material under specified terms and conditions. If
44
+ the licensor's permission is not necessary for any reason--for
45
+ example, because of any applicable exception or limitation to
46
+ copyright--then that use is not regulated by the license. Our
47
+ licenses grant only permissions under copyright and certain
48
+ other rights that a licensor has authority to grant. Use of
49
+ the licensed material may still be restricted for other
50
+ reasons, including because others have copyright or other
51
+ rights in the material. A licensor may make special requests,
52
+ such as asking that all changes be marked or described.
53
+ Although not required by our licenses, you are encouraged to
54
+ respect those requests where reasonable. More_considerations
55
+ for the public:
56
+ wiki.creativecommons.org/Considerations_for_licensees
57
+
58
+ =======================================================================
59
+
60
+ Creative Commons Attribution-NonCommercial 4.0 International Public
61
+ License
62
+
63
+ By exercising the Licensed Rights (defined below), You accept and agree
64
+ to be bound by the terms and conditions of this Creative Commons
65
+ Attribution-NonCommercial 4.0 International Public License ("Public
66
+ License"). To the extent this Public License may be interpreted as a
67
+ contract, You are granted the Licensed Rights in consideration of Your
68
+ acceptance of these terms and conditions, and the Licensor grants You
69
+ such rights in consideration of benefits the Licensor receives from
70
+ making the Licensed Material available under these terms and
71
+ conditions.
72
+
73
+
74
+ Section 1 -- Definitions.
75
+
76
+ a. Adapted Material means material subject to Copyright and Similar
77
+ Rights that is derived from or based upon the Licensed Material
78
+ and in which the Licensed Material is translated, altered,
79
+ arranged, transformed, or otherwise modified in a manner requiring
80
+ permission under the Copyright and Similar Rights held by the
81
+ Licensor. For purposes of this Public License, where the Licensed
82
+ Material is a musical work, performance, or sound recording,
83
+ Adapted Material is always produced where the Licensed Material is
84
+ synched in timed relation with a moving image.
85
+
86
+ b. Adapter's License means the license You apply to Your Copyright
87
+ and Similar Rights in Your contributions to Adapted Material in
88
+ accordance with the terms and conditions of this Public License.
89
+
90
+ c. Copyright and Similar Rights means copyright and/or similar rights
91
+ closely related to copyright including, without limitation,
92
+ performance, broadcast, sound recording, and Sui Generis Database
93
+ Rights, without regard to how the rights are labeled or
94
+ categorized. For purposes of this Public License, the rights
95
+ specified in Section 2(b)(1)-(2) are not Copyright and Similar
96
+ Rights.
97
+ d. Effective Technological Measures means those measures that, in the
98
+ absence of proper authority, may not be circumvented under laws
99
+ fulfilling obligations under Article 11 of the WIPO Copyright
100
+ Treaty adopted on December 20, 1996, and/or similar international
101
+ agreements.
102
+
103
+ e. Exceptions and Limitations means fair use, fair dealing, and/or
104
+ any other exception or limitation to Copyright and Similar Rights
105
+ that applies to Your use of the Licensed Material.
106
+
107
+ f. Licensed Material means the artistic or literary work, database,
108
+ or other material to which the Licensor applied this Public
109
+ License.
110
+
111
+ g. Licensed Rights means the rights granted to You subject to the
112
+ terms and conditions of this Public License, which are limited to
113
+ all Copyright and Similar Rights that apply to Your use of the
114
+ Licensed Material and that the Licensor has authority to license.
115
+
116
+ h. Licensor means the individual(s) or entity(ies) granting rights
117
+ under this Public License.
118
+
119
+ i. NonCommercial means not primarily intended for or directed towards
120
+ commercial advantage or monetary compensation. For purposes of
121
+ this Public License, the exchange of the Licensed Material for
122
+ other material subject to Copyright and Similar Rights by digital
123
+ file-sharing or similar means is NonCommercial provided there is
124
+ no payment of monetary compensation in connection with the
125
+ exchange.
126
+
127
+ j. Share means to provide material to the public by any means or
128
+ process that requires permission under the Licensed Rights, such
129
+ as reproduction, public display, public performance, distribution,
130
+ dissemination, communication, or importation, and to make material
131
+ available to the public including in ways that members of the
132
+ public may access the material from a place and at a time
133
+ individually chosen by them.
134
+
135
+ k. Sui Generis Database Rights means rights other than copyright
136
+ resulting from Directive 96/9/EC of the European Parliament and of
137
+ the Council of 11 March 1996 on the legal protection of databases,
138
+ as amended and/or succeeded, as well as other essentially
139
+ equivalent rights anywhere in the world.
140
+
141
+ l. You means the individual or entity exercising the Licensed Rights
142
+ under this Public License. Your has a corresponding meaning.
143
+
144
+
145
+ Section 2 -- Scope.
146
+
147
+ a. License grant.
148
+
149
+ 1. Subject to the terms and conditions of this Public License,
150
+ the Licensor hereby grants You a worldwide, royalty-free,
151
+ non-sublicensable, non-exclusive, irrevocable license to
152
+ exercise the Licensed Rights in the Licensed Material to:
153
+
154
+ a. reproduce and Share the Licensed Material, in whole or
155
+ in part, for NonCommercial purposes only; and
156
+
157
+ b. produce, reproduce, and Share Adapted Material for
158
+ NonCommercial purposes only.
159
+
160
+ 2. Exceptions and Limitations. For the avoidance of doubt, where
161
+ Exceptions and Limitations apply to Your use, this Public
162
+ License does not apply, and You do not need to comply with
163
+ its terms and conditions.
164
+
165
+ 3. Term. The term of this Public License is specified in Section
166
+ 6(a).
167
+
168
+ 4. Media and formats; technical modifications allowed. The
169
+ Licensor authorizes You to exercise the Licensed Rights in
170
+ all media and formats whether now known or hereafter created,
171
+ and to make technical modifications necessary to do so. The
172
+ Licensor waives and/or agrees not to assert any right or
173
+ authority to forbid You from making technical modifications
174
+ necessary to exercise the Licensed Rights, including
175
+ technical modifications necessary to circumvent Effective
176
+ Technological Measures. For purposes of this Public License,
177
+ simply making modifications authorized by this Section 2(a)
178
+ (4) never produces Adapted Material.
179
+
180
+ 5. Downstream recipients.
181
+
182
+ a. Offer from the Licensor -- Licensed Material. Every
183
+ recipient of the Licensed Material automatically
184
+ receives an offer from the Licensor to exercise the
185
+ Licensed Rights under the terms and conditions of this
186
+ Public License.
187
+
188
+ b. No downstream restrictions. You may not offer or impose
189
+ any additional or different terms or conditions on, or
190
+ apply any Effective Technological Measures to, the
191
+ Licensed Material if doing so restricts exercise of the
192
+ Licensed Rights by any recipient of the Licensed
193
+ Material.
194
+
195
+ 6. No endorsement. Nothing in this Public License constitutes or
196
+ may be construed as permission to assert or imply that You
197
+ are, or that Your use of the Licensed Material is, connected
198
+ with, or sponsored, endorsed, or granted official status by,
199
+ the Licensor or others designated to receive attribution as
200
+ provided in Section 3(a)(1)(A)(i).
201
+
202
+ b. Other rights.
203
+
204
+ 1. Moral rights, such as the right of integrity, are not
205
+ licensed under this Public License, nor are publicity,
206
+ privacy, and/or other similar personality rights; however, to
207
+ the extent possible, the Licensor waives and/or agrees not to
208
+ assert any such rights held by the Licensor to the limited
209
+ extent necessary to allow You to exercise the Licensed
210
+ Rights, but not otherwise.
211
+
212
+ 2. Patent and trademark rights are not licensed under this
213
+ Public License.
214
+
215
+ 3. To the extent possible, the Licensor waives any right to
216
+ collect royalties from You for the exercise of the Licensed
217
+ Rights, whether directly or through a collecting society
218
+ under any voluntary or waivable statutory or compulsory
219
+ licensing scheme. In all other cases the Licensor expressly
220
+ reserves any right to collect such royalties, including when
221
+ the Licensed Material is used other than for NonCommercial
222
+ purposes.
223
+
224
+
225
+ Section 3 -- License Conditions.
226
+
227
+ Your exercise of the Licensed Rights is expressly made subject to the
228
+ following conditions.
229
+
230
+ a. Attribution.
231
+
232
+ 1. If You Share the Licensed Material (including in modified
233
+ form), You must:
234
+
235
+ a. retain the following if it is supplied by the Licensor
236
+ with the Licensed Material:
237
+
238
+ i. identification of the creator(s) of the Licensed
239
+ Material and any others designated to receive
240
+ attribution, in any reasonable manner requested by
241
+ the Licensor (including by pseudonym if
242
+ designated);
243
+
244
+ ii. a copyright notice;
245
+
246
+ iii. a notice that refers to this Public License;
247
+
248
+ iv. a notice that refers to the disclaimer of
249
+ warranties;
250
+
251
+ v. a URI or hyperlink to the Licensed Material to the
252
+ extent reasonably practicable;
253
+
254
+ b. indicate if You modified the Licensed Material and
255
+ retain an indication of any previous modifications; and
256
+
257
+ c. indicate the Licensed Material is licensed under this
258
+ Public License, and include the text of, or the URI or
259
+ hyperlink to, this Public License.
260
+
261
+ 2. You may satisfy the conditions in Section 3(a)(1) in any
262
+ reasonable manner based on the medium, means, and context in
263
+ which You Share the Licensed Material. For example, it may be
264
+ reasonable to satisfy the conditions by providing a URI or
265
+ hyperlink to a resource that includes the required
266
+ information.
267
+
268
+ 3. If requested by the Licensor, You must remove any of the
269
+ information required by Section 3(a)(1)(A) to the extent
270
+ reasonably practicable.
271
+
272
+ 4. If You Share Adapted Material You produce, the Adapter's
273
+ License You apply must not prevent recipients of the Adapted
274
+ Material from complying with this Public License.
275
+
276
+
277
+ Section 4 -- Sui Generis Database Rights.
278
+
279
+ Where the Licensed Rights include Sui Generis Database Rights that
280
+ apply to Your use of the Licensed Material:
281
+
282
+ a. for the avoidance of doubt, Section 2(a)(1) grants You the right
283
+ to extract, reuse, reproduce, and Share all or a substantial
284
+ portion of the contents of the database for NonCommercial purposes
285
+ only;
286
+
287
+ b. if You include all or a substantial portion of the database
288
+ contents in a database in which You have Sui Generis Database
289
+ Rights, then the database in which You have Sui Generis Database
290
+ Rights (but not its individual contents) is Adapted Material; and
291
+
292
+ c. You must comply with the conditions in Section 3(a) if You Share
293
+ all or a substantial portion of the contents of the database.
294
+
295
+ For the avoidance of doubt, this Section 4 supplements and does not
296
+ replace Your obligations under this Public License where the Licensed
297
+ Rights include other Copyright and Similar Rights.
298
+
299
+
300
+ Section 5 -- Disclaimer of Warranties and Limitation of Liability.
301
+
302
+ a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
303
+ EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
304
+ AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
305
+ ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
306
+ IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
307
+ WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
308
+ PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
309
+ ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
310
+ KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
311
+ ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
312
+
313
+ b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
314
+ TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
315
+ NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
316
+ INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
317
+ COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
318
+ USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
319
+ ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
320
+ DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
321
+ IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
322
+
323
+ c. The disclaimer of warranties and limitation of liability provided
324
+ above shall be interpreted in a manner that, to the extent
325
+ possible, most closely approximates an absolute disclaimer and
326
+ waiver of all liability.
327
+
328
+
329
+ Section 6 -- Term and Termination.
330
+
331
+ a. This Public License applies for the term of the Copyright and
332
+ Similar Rights licensed here. However, if You fail to comply with
333
+ this Public License, then Your rights under this Public License
334
+ terminate automatically.
335
+
336
+ b. Where Your right to use the Licensed Material has terminated under
337
+ Section 6(a), it reinstates:
338
+
339
+ 1. automatically as of the date the violation is cured, provided
340
+ it is cured within 30 days of Your discovery of the
341
+ violation; or
342
+
343
+ 2. upon express reinstatement by the Licensor.
344
+
345
+ For the avoidance of doubt, this Section 6(b) does not affect any
346
+ right the Licensor may have to seek remedies for Your violations
347
+ of this Public License.
348
+
349
+ c. For the avoidance of doubt, the Licensor may also offer the
350
+ Licensed Material under separate terms or conditions or stop
351
+ distributing the Licensed Material at any time; however, doing so
352
+ will not terminate this Public License.
353
+
354
+ d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
355
+ License.
356
+
357
+
358
+ Section 7 -- Other Terms and Conditions.
359
+
360
+ a. The Licensor shall not be bound by any additional or different
361
+ terms or conditions communicated by You unless expressly agreed.
362
+
363
+ b. Any arrangements, understandings, or agreements regarding the
364
+ Licensed Material not stated herein are separate from and
365
+ independent of the terms and conditions of this Public License.
366
+
367
+
368
+ Section 8 -- Interpretation.
369
+
370
+ a. For the avoidance of doubt, this Public License does not, and
371
+ shall not be interpreted to, reduce, limit, restrict, or impose
372
+ conditions on any use of the Licensed Material that could lawfully
373
+ be made without permission under this Public License.
374
+
375
+ b. To the extent possible, if any provision of this Public License is
376
+ deemed unenforceable, it shall be automatically reformed to the
377
+ minimum extent necessary to make it enforceable. If the provision
378
+ cannot be reformed, it shall be severed from this Public License
379
+ without affecting the enforceability of the remaining terms and
380
+ conditions.
381
+
382
+ c. No term or condition of this Public License will be waived and no
383
+ failure to comply consented to unless expressly agreed to by the
384
+ Licensor.
385
+
386
+ d. Nothing in this Public License constitutes or may be interpreted
387
+ as a limitation upon, or waiver of, any privileges and immunities
388
+ that apply to the Licensor or You, including from the legal
389
+ processes of any jurisdiction or authority.
390
+
391
+ =======================================================================
392
+
393
+ Creative Commons is not a party to its public
394
+ licenses. Notwithstanding, Creative Commons may elect to apply one of
395
+ its public licenses to material it publishes and in those instances
396
+ will be considered the "Licensor." The text of the Creative Commons
397
+ public licenses is dedicated to the public domain under the CC0 Public
398
+ Domain Dedication. Except for the limited purpose of indicating that
399
+ material is shared under a Creative Commons public license or as
400
+ otherwise permitted by the Creative Commons policies published at
401
+ creativecommons.org/policies, Creative Commons does not authorize the
402
+ use of the trademark "Creative Commons" or any other trademark or logo
403
+ of Creative Commons without its prior written consent including,
404
+ without limitation, in connection with any unauthorized modifications
405
+ to any of its public licenses or any other arrangements,
406
+ understandings, or agreements concerning use of licensed material. For
407
+ the avoidance of doubt, this paragraph does not form part of the
408
+ public licenses.
409
+
410
+ Creative Commons may be contacted at creativecommons.org.
models/pggan_tf_official/README.md ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Progressive Growing of GANs for Improved Quality, Stability, and Variation<br><i>� Official TensorFlow implementation of the ICLR 2018 paper</i>
2
+
3
+ **Tero Karras** (NVIDIA), **Timo Aila** (NVIDIA), **Samuli Laine** (NVIDIA), **Jaakko Lehtinen** (NVIDIA and Aalto University)
4
+
5
+ * For business inquiries, please contact **[researchinquiries@nvidia.com](mailto:researchinquiries@nvidia.com)**
6
+ * For press and other inquiries, please contact Hector Marinez at **[hmarinez@nvidia.com](mailto:hmarinez@nvidia.com)**
7
+
8
+ ![Representative image](https://raw.githubusercontent.com/tkarras/progressive_growing_of_gans/master/representative_image_512x256.png)<br>
9
+ **Picture:** Two imaginary celebrities that were dreamed up by a random number generator.
10
+
11
+ **Abstract:**<br>
12
+ *We describe a new training methodology for generative adversarial networks. The key idea is to grow both the generator and discriminator progressively: starting from a low resolution, we add new layers that model increasingly fine details as training progresses. This both speeds the training up and greatly stabilizes it, allowing us to produce images of unprecedented quality, e.g., CelebA images at 1024�. We also propose a simple way to increase the variation in generated images, and achieve a record inception score of 8.80 in unsupervised CIFAR10. Additionally, we describe several implementation details that are important for discouraging unhealthy competition between the generator and discriminator. Finally, we suggest a new metric for evaluating GAN results, both in terms of image quality and variation. As an additional contribution, we construct a higher-quality version of the CelebA dataset.*
13
+
14
+ ## Resources
15
+
16
+ * [Paper (NVIDIA research)](http://research.nvidia.com/publication/2017-10_Progressive-Growing-of)
17
+ * [Paper (arXiv)](http://arxiv.org/abs/1710.10196)
18
+ * [Result video (YouTube)](https://youtu.be/G06dEcZ-QTg)
19
+ * [Additional material (Google Drive)](https://drive.google.com/open?id=0B4qLcYyJmiz0NHFULTdYc05lX0U)
20
+ * [ICLR 2018 poster (`karras2018iclr-poster.pdf`)](https://drive.google.com/open?id=1ilUVoIejsvG04G0PzFNVn3U3TjSSyHGu)
21
+ * [ICLR 2018 slides (`karras2018iclr-slides.pptx`)](https://drive.google.com/open?id=1jYlrX4DgTs2VAfRcyl3pcNI4ONkBg3-g)
22
+ * [Representative images (`images/representative-images`)](https://drive.google.com/open?id=0B4qLcYyJmiz0UE9zVHduWFVORlk)
23
+ * [High-quality video clips (`videos/high-quality-video-clips`)](https://drive.google.com/open?id=1gQu3O8ZhC-nko8wLFgcNqcwMnRYL_z85)
24
+ * [Huge collection of non-curated images for each dataset (`images/100k-generated-images`)](https://drive.google.com/open?id=1j6uZ_a6zci0HyKZdpDq9kSa8VihtEPCp)
25
+ * [Extensive video of random interpolations for each dataset (`videos/one-hour-of-random-interpolations`)](https://drive.google.com/open?id=1gAb3oqpaQFHZTwPUXHPIfBIP8eIeWNrI)
26
+ * [Pre-trained networks (`networks/tensorflow-version`)](https://drive.google.com/open?id=15hvzxt_XxuokSmj0uO4xxMTMWVc0cIMU)
27
+ * [Minimal example script for importing the pre-trained networks (`networks/tensorflow-version/example_import_script`)](https://drive.google.com/open?id=1A79qKDTFp6pExe4gTSgBsEOkxwa2oes_)
28
+ * [Data files needed to reconstruct the CelebA-HQ dataset (`datasets/celeba-hq-deltas`)](https://drive.google.com/open?id=0B4qLcYyJmiz0TXY1NG02bzZVRGs)
29
+ * [Example training logs and progress snapshots (`networks/tensorflow-version/example_training_runs`)](https://drive.google.com/open?id=1A9SKoQ7Xu2fqK22GHdMw8LZTh6qLvR7H)
30
+
31
+ All the material, including source code, is made freely available for non-commercial use under the Creative Commons [CC BY-NC 4.0](https://creativecommons.org/licenses/by-nc/4.0/legalcode) license. Feel free to use any of the material in your own work, as long as you give us appropriate credit by mentioning the title and author list of our paper.
32
+
33
+ ## Versions
34
+
35
+ There are two different versions of the source code. The *TensorFlow version* is newer and more polished, and we generally recommend it as a starting point if you are looking to experiment with our technique, build upon it, or apply it to novel datasets. The *original Theano version*, on the other hand, is what we used to produce all the results shown in our paper. We recommend using it if � and only if � you are looking to reproduce our exact results for benchmark datasets like CIFAR-10, MNIST-RGB, and CelebA.
36
+
37
+ The main differences are summarized in the following table:
38
+
39
+ | Feature | TensorFlow version | Original Theano version |
40
+ | :-------------------------------- | :-------------------------------------------: | :-----------------------: |
41
+ | Branch | [master](https://github.com/tkarras/progressive_growing_of_gans/tree/master) (this branch) | [original-theano-version](https://github.com/tkarras/progressive_growing_of_gans/tree/original-theano-version) |
42
+ | Multi-GPU support | Yes | No |
43
+ | FP16 mixed-precision support | Yes | No |
44
+ | Performance | High | Low |
45
+ | Training time for CelebA-HQ | 2 days (8 GPUs)<br>2 weeks (1 GPU) | 1�2 months |
46
+ | Repro CelebA-HQ results | Yes � very close | Yes � identical |
47
+ | Repro LSUN results | Yes � very close | Yes � identical |
48
+ | Repro CIFAR-10 results | No | Yes � identical |
49
+ | Repro MNIST mode recovery | No | Yes � identical |
50
+ | Repro ablation study (Table 1) | No | Yes � identical |
51
+ | Dataset format | TFRecords | HDF5 |
52
+ | Backwards compatibility | Can import networks<br>trained with Theano | N/A |
53
+ | Code quality | Reasonable | Somewhat messy |
54
+ | Code status | In active use | No longer maintained |
55
+
56
+ ## System requirements
57
+
58
+ * Both Linux and Windows are supported, but we strongly recommend Linux for performance and compatibility reasons.
59
+ * 64-bit Python 3.6 installation with numpy 1.13.3 or newer. We recommend Anaconda3.
60
+ * One or more high-end NVIDIA Pascal or Volta GPUs with 16GB of DRAM. We recommend NVIDIA DGX-1 with 8 Tesla V100 GPUs.
61
+ * NVIDIA driver 391.25 or newer, CUDA toolkit 9.0 or newer, cuDNN 7.1.2 or newer.
62
+ * Additional Python packages listed in `requirements-pip.txt`
63
+
64
+ ## Importing and using pre-trained networks
65
+
66
+ All pre-trained networks found on Google Drive, as well as ones produced by the training script, are stored as Python PKL files. They can be imported using the standard `pickle` mechanism as long as two conditions are met: (1) The directory containing the Progressive GAN code repository must be included in the PYTHONPATH environment variable, and (2) a `tf.Session()` object must have been created beforehand and set as default. Each PKL file contains 3 instances of `tfutil.Network`:
67
+
68
+ ```
69
+ # Import official CelebA-HQ networks.
70
+ with open('karras2018iclr-celebahq-1024x1024.pkl', 'rb') as file:
71
+ G, D, Gs = pickle.load(file)
72
+ # G = Instantaneous snapshot of the generator, mainly useful for resuming a previous training run.
73
+ # D = Instantaneous snapshot of the discriminator, mainly useful for resuming a previous training run.
74
+ # Gs = Long-term average of the generator, yielding higher-quality results than the instantaneous snapshot.
75
+ ```
76
+
77
+ It is also possible to import networks that were produced using the Theano implementation, as long as they do not employ any features that are not natively supported by the TensorFlow version (minibatch discrimination, batch normalization, etc.). To enable Theano network import, however, you must use `misc.load_pkl()` in place of `pickle.load()`:
78
+
79
+ ```
80
+ # Import Theano versions of the official CelebA-HQ networks.
81
+ import misc
82
+ G, D, Gs = misc.load_pkl('200-celebahq-1024x1024/network-final.pkl')
83
+ ```
84
+
85
+ Once you have imported the networks, you can call `Gs.run()` to produce a set of images for given latent vectors, or `Gs.get_output_for()` to include the generator network in a larger TensorFlow expression. For further details, please consult the example script found on Google Drive. Instructions:
86
+
87
+ 1. Pull the Progressive GAN code repository and add it to your PYTHONPATH environment variable.
88
+ 2. Install the required Python packages with `pip install -r requirements-pip.txt`
89
+ 2. Download [`import_example.py`](https://drive.google.com/open?id=1xZul7DwqqJoe5OCuKHw6fQVeQZNIMSuF) from [`networks/tensorflow-version/example_import_script`](https://drive.google.com/open?id=1A79qKDTFp6pExe4gTSgBsEOkxwa2oes_)
90
+ 3. Download [`karras2018iclr-celebahq-1024x1024.pkl`](https://drive.google.com/open?id=188K19ucknC6wg1R6jbuPEhTq9zoufOx4) from [`networks/tensorflow-version`](https://drive.google.com/open?id=15hvzxt_XxuokSmj0uO4xxMTMWVc0cIMU) and place it in the same directory as the script.
91
+ 5. Run the script with `python import_example.py`
92
+ 6. If everything goes well, the script should generate 10 PNG images (`img0.png` � `img9.png`) that match the ones found in [`networks/tensorflow-version/example_import_script`](https://drive.google.com/open?id=1A79qKDTFp6pExe4gTSgBsEOkxwa2oes_) exactly.
93
+
94
+ ## Preparing datasets for training
95
+
96
+ The Progressive GAN code repository contains a command-line tool for recreating bit-exact replicas of the datasets that we used in the paper. The tool also provides various utilities for operating on the datasets:
97
+
98
+ ```
99
+ usage: dataset_tool.py [-h] <command> ...
100
+
101
+ display Display images in dataset.
102
+ extract Extract images from dataset.
103
+ compare Compare two datasets.
104
+ create_mnist Create dataset for MNIST.
105
+ create_mnistrgb Create dataset for MNIST-RGB.
106
+ create_cifar10 Create dataset for CIFAR-10.
107
+ create_cifar100 Create dataset for CIFAR-100.
108
+ create_svhn Create dataset for SVHN.
109
+ create_lsun Create dataset for single LSUN category.
110
+ create_celeba Create dataset for CelebA.
111
+ create_celebahq Create dataset for CelebA-HQ.
112
+ create_from_images Create dataset from a directory full of images.
113
+ create_from_hdf5 Create dataset from legacy HDF5 archive.
114
+
115
+ Type "dataset_tool.py <command> -h" for more information.
116
+ ```
117
+
118
+ The datasets are represented by directories containing the same image data in several resolutions to enable efficient streaming. There is a separate `*.tfrecords` file for each resolution, and if the dataset contains labels, they are stored in a separate file as well:
119
+
120
+ ```
121
+ > python dataset_tool.py create_cifar10 datasets/cifar10 ~/downloads/cifar10
122
+ > ls -la datasets/cifar10
123
+ drwxr-xr-x 2 user user 7 Feb 21 10:07 .
124
+ drwxrwxr-x 10 user user 62 Apr 3 15:10 ..
125
+ -rw-r--r-- 1 user user 4900000 Feb 19 13:17 cifar10-r02.tfrecords
126
+ -rw-r--r-- 1 user user 12350000 Feb 19 13:17 cifar10-r03.tfrecords
127
+ -rw-r--r-- 1 user user 41150000 Feb 19 13:17 cifar10-r04.tfrecords
128
+ -rw-r--r-- 1 user user 156350000 Feb 19 13:17 cifar10-r05.tfrecords
129
+ -rw-r--r-- 1 user user 2000080 Feb 19 13:17 cifar10-rxx.labels
130
+ ```
131
+
132
+ The ```create_*``` commands take the standard version of a given dataset as input and produce the corresponding `*.tfrecords` files as output. Additionally, the ```create_celebahq``` command requires a set of data files representing deltas with respect to the original CelebA dataset. These deltas (27.6GB) can be downloaded from [`datasets/celeba-hq-deltas`](https://drive.google.com/open?id=0B4qLcYyJmiz0TXY1NG02bzZVRGs).
133
+
134
+ **Note about module versions**: Some of the dataset commands require specific versions of Python modules and system libraries (e.g. pillow, libjpeg), and they will give an error if the versions do not match. Please heed the error messages � there is **no way** to get the commands to work other than installing these specific versions.
135
+
136
+ ## Training networks
137
+
138
+ Once the necessary datasets are set up, you can proceed to train your own networks. The general procedure is as follows:
139
+
140
+ 1. Edit `config.py` to specify the dataset and training configuration by uncommenting/editing specific lines.
141
+ 2. Run the training script with `python train.py`.
142
+ 3. The results are written into a newly created subdirectory under `config.result_dir`
143
+ 4. Wait several days (or weeks) for the training to converge, and analyze the results.
144
+
145
+ By default, `config.py` is configured to train a 1024x1024 network for CelebA-HQ using a single-GPU. This is expected to take about two weeks even on the highest-end NVIDIA GPUs. The key to enabling faster training is to employ multiple GPUs and/or go for a lower-resolution dataset. To this end, `config.py` contains several examples for commonly used datasets, as well as a set of "configuration presets" for multi-GPU training. All of the presets are expected to yield roughly the same image quality for CelebA-HQ, but their total training time can vary considerably:
146
+
147
+ * `preset-v1-1gpu`: Original config that was used to produce the CelebA-HQ and LSUN results shown in the paper. Expected to take about 1 month on NVIDIA Tesla V100.
148
+ * `preset-v2-1gpu`: Optimized config that converges considerably faster than the original one. Expected to take about 2 weeks on 1xV100.
149
+ * `preset-v2-2gpus`: Optimized config for 2 GPUs. Takes about 1 week on 2xV100.
150
+ * `preset-v2-4gpus`: Optimized config for 4 GPUs. Takes about 3 days on 4xV100.
151
+ * `preset-v2-8gpus`: Optimized config for 8 GPUs. Takes about 2 days on 8xV100.
152
+
153
+ For reference, the expected output of each configuration preset for CelebA-HQ can be found in [`networks/tensorflow-version/example_training_runs`](https://drive.google.com/open?id=1A9SKoQ7Xu2fqK22GHdMw8LZTh6qLvR7H)
154
+
155
+ Other noteworthy config options:
156
+
157
+ * `fp16`: Enable [FP16 mixed-precision training](http://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html) to reduce the training times even further. The actual speedup is heavily dependent on GPU architecture and cuDNN version, and it can be expected to increase considerably in the future.
158
+ * `BENCHMARK`: Quickly iterate through the resolutions to measure the raw training performance.
159
+ * `BENCHMARK0`: Same as `BENCHMARK`, but only use the highest resolution.
160
+ * `syn1024rgb`: Synthetic 1024x1024 dataset consisting of just black images. Useful for benchmarking.
161
+ * `VERBOSE`: Save image and network snapshots very frequently to facilitate debugging.
162
+ * `GRAPH` and `HIST`: Include additional data in the TensorBoard report.
163
+
164
+ ## Analyzing results
165
+
166
+ Training results can be analyzed in several ways:
167
+
168
+ * **Manual inspection**: The training script saves a snapshot of randomly generated images at regular intervals in `fakes*.png` and reports the overall progress in `log.txt`.
169
+ * **TensorBoard**: The training script also exports various running statistics in a `*.tfevents` file that can be visualized in TensorBoard with `tensorboard --logdir <result_subdir>`.
170
+ * **Generating images and videos**: At the end of `config.py`, there are several pre-defined configs to launch utility scripts (`generate_*`). For example:
171
+ * Suppose you have an ongoing training run titled `010-pgan-celebahq-preset-v1-1gpu-fp32`, and you want to generate a video of random interpolations for the latest snapshot.
172
+ * Uncomment the `generate_interpolation_video` line in `config.py`, replace `run_id=10`, and run `python train.py`
173
+ * The script will automatically locate the latest network snapshot and create a new result directory containing a single MP4 file.
174
+ * **Quality metrics**: Similar to the previous example, `config.py` also contains pre-defined configs to compute various quality metrics (Sliced Wasserstein distance, Fr�chet inception distance, etc.) for an existing training run. The metrics are computed for each network snapshot in succession and stored in `metric-*.txt` in the original result directory.
models/pggan_tf_official/config.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
2
+ #
3
+ # This work is licensed under the Creative Commons Attribution-NonCommercial
4
+ # 4.0 International License. To view a copy of this license, visit
5
+ # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
6
+ # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
7
+
8
+ #----------------------------------------------------------------------------
9
+ # Convenience class that behaves exactly like dict(), but allows accessing
10
+ # the keys and values using the attribute syntax, i.e., "mydict.key = value".
11
+
12
+ class EasyDict(dict):
13
+ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs)
14
+ def __getattr__(self, name): return self[name]
15
+ def __setattr__(self, name, value): self[name] = value
16
+ def __delattr__(self, name): del self[name]
17
+
18
+ #----------------------------------------------------------------------------
19
+ # Paths.
20
+
21
+ data_dir = 'datasets'
22
+ result_dir = 'results'
23
+
24
+ #----------------------------------------------------------------------------
25
+ # TensorFlow options.
26
+
27
+ tf_config = EasyDict() # TensorFlow session config, set by tfutil.init_tf().
28
+ env = EasyDict() # Environment variables, set by the main program in train.py.
29
+
30
+ tf_config['graph_options.place_pruned_graph'] = True # False (default) = Check that all ops are available on the designated device. True = Skip the check for ops that are not used.
31
+ #tf_config['gpu_options.allow_growth'] = False # False (default) = Allocate all GPU memory at the beginning. True = Allocate only as much GPU memory as needed.
32
+ #env.CUDA_VISIBLE_DEVICES = '0' # Unspecified (default) = Use all available GPUs. List of ints = CUDA device numbers to use.
33
+ env.TF_CPP_MIN_LOG_LEVEL = '1' # 0 (default) = Print all available debug info from TensorFlow. 1 = Print warnings and errors, but disable debug info.
34
+
35
+ #----------------------------------------------------------------------------
36
+ # Official training configs, targeted mainly for CelebA-HQ.
37
+ # To run, comment/uncomment the lines as appropriate and launch train.py.
38
+
39
+ desc = 'pgan' # Description string included in result subdir name.
40
+ random_seed = 1000 # Global random seed.
41
+ dataset = EasyDict() # Options for dataset.load_dataset().
42
+ train = EasyDict(func='train.train_progressive_gan') # Options for main training func.
43
+ G = EasyDict(func='networks.G_paper') # Options for generator network.
44
+ D = EasyDict(func='networks.D_paper') # Options for discriminator network.
45
+ G_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for generator optimizer.
46
+ D_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for discriminator optimizer.
47
+ G_loss = EasyDict(func='loss.G_wgan_acgan') # Options for generator loss.
48
+ D_loss = EasyDict(func='loss.D_wgangp_acgan') # Options for discriminator loss.
49
+ sched = EasyDict() # Options for train.TrainingSchedule.
50
+ grid = EasyDict(size='1080p', layout='random') # Options for train.setup_snapshot_image_grid().
51
+
52
+ # Dataset (choose one).
53
+ desc += '-celebahq'; dataset = EasyDict(tfrecord_dir='celebahq'); train.mirror_augment = True
54
+ #desc += '-celeba'; dataset = EasyDict(tfrecord_dir='celeba'); train.mirror_augment = True
55
+ #desc += '-cifar10'; dataset = EasyDict(tfrecord_dir='cifar10')
56
+ #desc += '-cifar100'; dataset = EasyDict(tfrecord_dir='cifar100')
57
+ #desc += '-svhn'; dataset = EasyDict(tfrecord_dir='svhn')
58
+ #desc += '-mnist'; dataset = EasyDict(tfrecord_dir='mnist')
59
+ #desc += '-mnistrgb'; dataset = EasyDict(tfrecord_dir='mnistrgb')
60
+ #desc += '-syn1024rgb'; dataset = EasyDict(class_name='dataset.SyntheticDataset', resolution=1024, num_channels=3)
61
+ #desc += '-lsun-airplane'; dataset = EasyDict(tfrecord_dir='lsun-airplane-100k'); train.mirror_augment = True
62
+ #desc += '-lsun-bedroom'; dataset = EasyDict(tfrecord_dir='lsun-bedroom-100k'); train.mirror_augment = True
63
+ #desc += '-lsun-bicycle'; dataset = EasyDict(tfrecord_dir='lsun-bicycle-100k'); train.mirror_augment = True
64
+ #desc += '-lsun-bird'; dataset = EasyDict(tfrecord_dir='lsun-bird-100k'); train.mirror_augment = True
65
+ #desc += '-lsun-boat'; dataset = EasyDict(tfrecord_dir='lsun-boat-100k'); train.mirror_augment = True
66
+ #desc += '-lsun-bottle'; dataset = EasyDict(tfrecord_dir='lsun-bottle-100k'); train.mirror_augment = True
67
+ #desc += '-lsun-bridge'; dataset = EasyDict(tfrecord_dir='lsun-bridge-100k'); train.mirror_augment = True
68
+ #desc += '-lsun-bus'; dataset = EasyDict(tfrecord_dir='lsun-bus-100k'); train.mirror_augment = True
69
+ #desc += '-lsun-car'; dataset = EasyDict(tfrecord_dir='lsun-car-100k'); train.mirror_augment = True
70
+ #desc += '-lsun-cat'; dataset = EasyDict(tfrecord_dir='lsun-cat-100k'); train.mirror_augment = True
71
+ #desc += '-lsun-chair'; dataset = EasyDict(tfrecord_dir='lsun-chair-100k'); train.mirror_augment = True
72
+ #desc += '-lsun-churchoutdoor'; dataset = EasyDict(tfrecord_dir='lsun-churchoutdoor-100k'); train.mirror_augment = True
73
+ #desc += '-lsun-classroom'; dataset = EasyDict(tfrecord_dir='lsun-classroom-100k'); train.mirror_augment = True
74
+ #desc += '-lsun-conferenceroom'; dataset = EasyDict(tfrecord_dir='lsun-conferenceroom-100k'); train.mirror_augment = True
75
+ #desc += '-lsun-cow'; dataset = EasyDict(tfrecord_dir='lsun-cow-100k'); train.mirror_augment = True
76
+ #desc += '-lsun-diningroom'; dataset = EasyDict(tfrecord_dir='lsun-diningroom-100k'); train.mirror_augment = True
77
+ #desc += '-lsun-diningtable'; dataset = EasyDict(tfrecord_dir='lsun-diningtable-100k'); train.mirror_augment = True
78
+ #desc += '-lsun-dog'; dataset = EasyDict(tfrecord_dir='lsun-dog-100k'); train.mirror_augment = True
79
+ #desc += '-lsun-horse'; dataset = EasyDict(tfrecord_dir='lsun-horse-100k'); train.mirror_augment = True
80
+ #desc += '-lsun-kitchen'; dataset = EasyDict(tfrecord_dir='lsun-kitchen-100k'); train.mirror_augment = True
81
+ #desc += '-lsun-livingroom'; dataset = EasyDict(tfrecord_dir='lsun-livingroom-100k'); train.mirror_augment = True
82
+ #desc += '-lsun-motorbike'; dataset = EasyDict(tfrecord_dir='lsun-motorbike-100k'); train.mirror_augment = True
83
+ #desc += '-lsun-person'; dataset = EasyDict(tfrecord_dir='lsun-person-100k'); train.mirror_augment = True
84
+ #desc += '-lsun-pottedplant'; dataset = EasyDict(tfrecord_dir='lsun-pottedplant-100k'); train.mirror_augment = True
85
+ #desc += '-lsun-restaurant'; dataset = EasyDict(tfrecord_dir='lsun-restaurant-100k'); train.mirror_augment = True
86
+ #desc += '-lsun-sheep'; dataset = EasyDict(tfrecord_dir='lsun-sheep-100k'); train.mirror_augment = True
87
+ #desc += '-lsun-sofa'; dataset = EasyDict(tfrecord_dir='lsun-sofa-100k'); train.mirror_augment = True
88
+ #desc += '-lsun-tower'; dataset = EasyDict(tfrecord_dir='lsun-tower-100k'); train.mirror_augment = True
89
+ #desc += '-lsun-train'; dataset = EasyDict(tfrecord_dir='lsun-train-100k'); train.mirror_augment = True
90
+ #desc += '-lsun-tvmonitor'; dataset = EasyDict(tfrecord_dir='lsun-tvmonitor-100k'); train.mirror_augment = True
91
+
92
+ # Conditioning & snapshot options.
93
+ #desc += '-cond'; dataset.max_label_size = 'full' # conditioned on full label
94
+ #desc += '-cond1'; dataset.max_label_size = 1 # conditioned on first component of the label
95
+ #desc += '-g4k'; grid.size = '4k'
96
+ #desc += '-grpc'; grid.layout = 'row_per_class'
97
+
98
+ # Config presets (choose one).
99
+ #desc += '-preset-v1-1gpu'; num_gpus = 1; D.mbstd_group_size = 16; sched.minibatch_base = 16; sched.minibatch_dict = {256: 14, 512: 6, 1024: 3}; sched.lod_training_kimg = 800; sched.lod_transition_kimg = 800; train.total_kimg = 19000
100
+ desc += '-preset-v2-1gpu'; num_gpus = 1; sched.minibatch_base = 4; sched.minibatch_dict = {4: 128, 8: 128, 16: 128, 32: 64, 64: 32, 128: 16, 256: 8, 512: 4}; sched.G_lrate_dict = {1024: 0.0015}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict); train.total_kimg = 12000
101
+ #desc += '-preset-v2-2gpus'; num_gpus = 2; sched.minibatch_base = 8; sched.minibatch_dict = {4: 256, 8: 256, 16: 128, 32: 64, 64: 32, 128: 16, 256: 8}; sched.G_lrate_dict = {512: 0.0015, 1024: 0.002}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict); train.total_kimg = 12000
102
+ #desc += '-preset-v2-4gpus'; num_gpus = 4; sched.minibatch_base = 16; sched.minibatch_dict = {4: 512, 8: 256, 16: 128, 32: 64, 64: 32, 128: 16}; sched.G_lrate_dict = {256: 0.0015, 512: 0.002, 1024: 0.003}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict); train.total_kimg = 12000
103
+ #desc += '-preset-v2-8gpus'; num_gpus = 8; sched.minibatch_base = 32; sched.minibatch_dict = {4: 512, 8: 256, 16: 128, 32: 64, 64: 32}; sched.G_lrate_dict = {128: 0.0015, 256: 0.002, 512: 0.003, 1024: 0.003}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict); train.total_kimg = 12000
104
+
105
+ # Numerical precision (choose one).
106
+ desc += '-fp32'; sched.max_minibatch_per_gpu = {256: 16, 512: 8, 1024: 4}
107
+ #desc += '-fp16'; G.dtype = 'float16'; D.dtype = 'float16'; G.pixelnorm_epsilon=1e-4; G_opt.use_loss_scaling = True; D_opt.use_loss_scaling = True; sched.max_minibatch_per_gpu = {512: 16, 1024: 8}
108
+
109
+ # Disable individual features.
110
+ #desc += '-nogrowing'; sched.lod_initial_resolution = 1024; sched.lod_training_kimg = 0; sched.lod_transition_kimg = 0; train.total_kimg = 10000
111
+ #desc += '-nopixelnorm'; G.use_pixelnorm = False
112
+ #desc += '-nowscale'; G.use_wscale = False; D.use_wscale = False
113
+ #desc += '-noleakyrelu'; G.use_leakyrelu = False
114
+ #desc += '-nosmoothing'; train.G_smoothing = 0.0
115
+ #desc += '-norepeat'; train.minibatch_repeats = 1
116
+ #desc += '-noreset'; train.reset_opt_for_new_lod = False
117
+
118
+ # Special modes.
119
+ #desc += '-BENCHMARK'; sched.lod_initial_resolution = 4; sched.lod_training_kimg = 3; sched.lod_transition_kimg = 3; train.total_kimg = (8*2+1)*3; sched.tick_kimg_base = 1; sched.tick_kimg_dict = {}; train.image_snapshot_ticks = 1000; train.network_snapshot_ticks = 1000
120
+ #desc += '-BENCHMARK0'; sched.lod_initial_resolution = 1024; train.total_kimg = 10; sched.tick_kimg_base = 1; sched.tick_kimg_dict = {}; train.image_snapshot_ticks = 1000; train.network_snapshot_ticks = 1000
121
+ #desc += '-VERBOSE'; sched.tick_kimg_base = 1; sched.tick_kimg_dict = {}; train.image_snapshot_ticks = 1; train.network_snapshot_ticks = 100
122
+ #desc += '-GRAPH'; train.save_tf_graph = True
123
+ #desc += '-HIST'; train.save_weight_histograms = True
124
+
125
+ #----------------------------------------------------------------------------
126
+ # Utility scripts.
127
+ # To run, uncomment the appropriate line and launch train.py.
128
+
129
+ #train = EasyDict(func='util_scripts.generate_fake_images', run_id=23, num_pngs=1000); num_gpus = 1; desc = 'fake-images-' + str(train.run_id)
130
+ #train = EasyDict(func='util_scripts.generate_fake_images', run_id=23, grid_size=[15,8], num_pngs=10, image_shrink=4); num_gpus = 1; desc = 'fake-grids-' + str(train.run_id)
131
+ #train = EasyDict(func='util_scripts.generate_interpolation_video', run_id=23, grid_size=[1,1], duration_sec=60.0, smoothing_sec=1.0); num_gpus = 1; desc = 'interpolation-video-' + str(train.run_id)
132
+ #train = EasyDict(func='util_scripts.generate_training_video', run_id=23, duration_sec=20.0); num_gpus = 1; desc = 'training-video-' + str(train.run_id)
133
+
134
+ #train = EasyDict(func='util_scripts.evaluate_metrics', run_id=23, log='metric-swd-16k.txt', metrics=['swd'], num_images=16384, real_passes=2); num_gpus = 1; desc = train.log.split('.')[0] + '-' + str(train.run_id)
135
+ #train = EasyDict(func='util_scripts.evaluate_metrics', run_id=23, log='metric-fid-10k.txt', metrics=['fid'], num_images=10000, real_passes=1); num_gpus = 1; desc = train.log.split('.')[0] + '-' + str(train.run_id)
136
+ #train = EasyDict(func='util_scripts.evaluate_metrics', run_id=23, log='metric-fid-50k.txt', metrics=['fid'], num_images=50000, real_passes=1); num_gpus = 1; desc = train.log.split('.')[0] + '-' + str(train.run_id)
137
+ #train = EasyDict(func='util_scripts.evaluate_metrics', run_id=23, log='metric-is-50k.txt', metrics=['is'], num_images=50000, real_passes=1); num_gpus = 1; desc = train.log.split('.')[0] + '-' + str(train.run_id)
138
+ #train = EasyDict(func='util_scripts.evaluate_metrics', run_id=23, log='metric-msssim-20k.txt', metrics=['msssim'], num_images=20000, real_passes=1); num_gpus = 1; desc = train.log.split('.')[0] + '-' + str(train.run_id)
139
+
140
+ #----------------------------------------------------------------------------
models/pggan_tf_official/dataset.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
2
+ #
3
+ # This work is licensed under the Creative Commons Attribution-NonCommercial
4
+ # 4.0 International License. To view a copy of this license, visit
5
+ # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
6
+ # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
7
+
8
+ import os
9
+ import glob
10
+ import numpy as np
11
+ import tensorflow as tf
12
+ import tfutil
13
+
14
+ #----------------------------------------------------------------------------
15
+ # Parse individual image from a tfrecords file.
16
+
17
+ def parse_tfrecord_tf(record):
18
+ features = tf.parse_single_example(record, features={
19
+ 'shape': tf.FixedLenFeature([3], tf.int64),
20
+ 'data': tf.FixedLenFeature([], tf.string)})
21
+ data = tf.decode_raw(features['data'], tf.uint8)
22
+ return tf.reshape(data, features['shape'])
23
+
24
+ def parse_tfrecord_np(record):
25
+ ex = tf.train.Example()
26
+ ex.ParseFromString(record)
27
+ shape = ex.features.feature['shape'].int64_list.value
28
+ data = ex.features.feature['data'].bytes_list.value[0]
29
+ return np.fromstring(data, np.uint8).reshape(shape)
30
+
31
+ #----------------------------------------------------------------------------
32
+ # Dataset class that loads data from tfrecords files.
33
+
34
+ class TFRecordDataset:
35
+ def __init__(self,
36
+ tfrecord_dir, # Directory containing a collection of tfrecords files.
37
+ resolution = None, # Dataset resolution, None = autodetect.
38
+ label_file = None, # Relative path of the labels file, None = autodetect.
39
+ max_label_size = 0, # 0 = no labels, 'full' = full labels, <int> = N first label components.
40
+ repeat = True, # Repeat dataset indefinitely.
41
+ shuffle_mb = 4096, # Shuffle data within specified window (megabytes), 0 = disable shuffling.
42
+ prefetch_mb = 2048, # Amount of data to prefetch (megabytes), 0 = disable prefetching.
43
+ buffer_mb = 256, # Read buffer size (megabytes).
44
+ num_threads = 2): # Number of concurrent threads.
45
+
46
+ self.tfrecord_dir = tfrecord_dir
47
+ self.resolution = None
48
+ self.resolution_log2 = None
49
+ self.shape = [] # [channel, height, width]
50
+ self.dtype = 'uint8'
51
+ self.dynamic_range = [0, 255]
52
+ self.label_file = label_file
53
+ self.label_size = None # [component]
54
+ self.label_dtype = None
55
+ self._np_labels = None
56
+ self._tf_minibatch_in = None
57
+ self._tf_labels_var = None
58
+ self._tf_labels_dataset = None
59
+ self._tf_datasets = dict()
60
+ self._tf_iterator = None
61
+ self._tf_init_ops = dict()
62
+ self._tf_minibatch_np = None
63
+ self._cur_minibatch = -1
64
+ self._cur_lod = -1
65
+
66
+ # List tfrecords files and inspect their shapes.
67
+ assert os.path.isdir(self.tfrecord_dir)
68
+ tfr_files = sorted(glob.glob(os.path.join(self.tfrecord_dir, '*.tfrecords')))
69
+ assert len(tfr_files) >= 1
70
+ tfr_shapes = []
71
+ for tfr_file in tfr_files:
72
+ tfr_opt = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.NONE)
73
+ for record in tf.python_io.tf_record_iterator(tfr_file, tfr_opt):
74
+ tfr_shapes.append(parse_tfrecord_np(record).shape)
75
+ break
76
+
77
+ # Autodetect label filename.
78
+ if self.label_file is None:
79
+ guess = sorted(glob.glob(os.path.join(self.tfrecord_dir, '*.labels')))
80
+ if len(guess):
81
+ self.label_file = guess[0]
82
+ elif not os.path.isfile(self.label_file):
83
+ guess = os.path.join(self.tfrecord_dir, self.label_file)
84
+ if os.path.isfile(guess):
85
+ self.label_file = guess
86
+
87
+ # Determine shape and resolution.
88
+ max_shape = max(tfr_shapes, key=lambda shape: np.prod(shape))
89
+ self.resolution = resolution if resolution is not None else max_shape[1]
90
+ self.resolution_log2 = int(np.log2(self.resolution))
91
+ self.shape = [max_shape[0], self.resolution, self.resolution]
92
+ tfr_lods = [self.resolution_log2 - int(np.log2(shape[1])) for shape in tfr_shapes]
93
+ assert all(shape[0] == max_shape[0] for shape in tfr_shapes)
94
+ assert all(shape[1] == shape[2] for shape in tfr_shapes)
95
+ assert all(shape[1] == self.resolution // (2**lod) for shape, lod in zip(tfr_shapes, tfr_lods))
96
+ assert all(lod in tfr_lods for lod in range(self.resolution_log2 - 1))
97
+
98
+ # Load labels.
99
+ assert max_label_size == 'full' or max_label_size >= 0
100
+ self._np_labels = np.zeros([1<<20, 0], dtype=np.float32)
101
+ if self.label_file is not None and max_label_size != 0:
102
+ self._np_labels = np.load(self.label_file)
103
+ assert self._np_labels.ndim == 2
104
+ if max_label_size != 'full' and self._np_labels.shape[1] > max_label_size:
105
+ self._np_labels = self._np_labels[:, :max_label_size]
106
+ self.label_size = self._np_labels.shape[1]
107
+ self.label_dtype = self._np_labels.dtype.name
108
+
109
+ # Build TF expressions.
110
+ with tf.name_scope('Dataset'), tf.device('/cpu:0'):
111
+ self._tf_minibatch_in = tf.placeholder(tf.int64, name='minibatch_in', shape=[])
112
+ tf_labels_init = tf.zeros(self._np_labels.shape, self._np_labels.dtype)
113
+ self._tf_labels_var = tf.Variable(tf_labels_init, name='labels_var')
114
+ tfutil.set_vars({self._tf_labels_var: self._np_labels})
115
+ self._tf_labels_dataset = tf.data.Dataset.from_tensor_slices(self._tf_labels_var)
116
+ for tfr_file, tfr_shape, tfr_lod in zip(tfr_files, tfr_shapes, tfr_lods):
117
+ if tfr_lod < 0:
118
+ continue
119
+ dset = tf.data.TFRecordDataset(tfr_file, compression_type='', buffer_size=buffer_mb<<20)
120
+ dset = dset.map(parse_tfrecord_tf, num_parallel_calls=num_threads)
121
+ dset = tf.data.Dataset.zip((dset, self._tf_labels_dataset))
122
+ bytes_per_item = np.prod(tfr_shape) * np.dtype(self.dtype).itemsize
123
+ if shuffle_mb > 0:
124
+ dset = dset.shuffle(((shuffle_mb << 20) - 1) // bytes_per_item + 1)
125
+ if repeat:
126
+ dset = dset.repeat()
127
+ if prefetch_mb > 0:
128
+ dset = dset.prefetch(((prefetch_mb << 20) - 1) // bytes_per_item + 1)
129
+ dset = dset.batch(self._tf_minibatch_in)
130
+ self._tf_datasets[tfr_lod] = dset
131
+ self._tf_iterator = tf.data.Iterator.from_structure(self._tf_datasets[0].output_types, self._tf_datasets[0].output_shapes)
132
+ self._tf_init_ops = {lod: self._tf_iterator.make_initializer(dset) for lod, dset in self._tf_datasets.items()}
133
+
134
+ # Use the given minibatch size and level-of-detail for the data returned by get_minibatch_tf().
135
+ def configure(self, minibatch_size, lod=0):
136
+ lod = int(np.floor(lod))
137
+ assert minibatch_size >= 1 and lod in self._tf_datasets
138
+ if self._cur_minibatch != minibatch_size or self._cur_lod != lod:
139
+ self._tf_init_ops[lod].run({self._tf_minibatch_in: minibatch_size})
140
+ self._cur_minibatch = minibatch_size
141
+ self._cur_lod = lod
142
+
143
+ # Get next minibatch as TensorFlow expressions.
144
+ def get_minibatch_tf(self): # => images, labels
145
+ return self._tf_iterator.get_next()
146
+
147
+ # Get next minibatch as NumPy arrays.
148
+ def get_minibatch_np(self, minibatch_size, lod=0): # => images, labels
149
+ self.configure(minibatch_size, lod)
150
+ if self._tf_minibatch_np is None:
151
+ self._tf_minibatch_np = self.get_minibatch_tf()
152
+ return tfutil.run(self._tf_minibatch_np)
153
+
154
+ # Get random labels as TensorFlow expression.
155
+ def get_random_labels_tf(self, minibatch_size): # => labels
156
+ if self.label_size > 0:
157
+ return tf.gather(self._tf_labels_var, tf.random_uniform([minibatch_size], 0, self._np_labels.shape[0], dtype=tf.int32))
158
+ else:
159
+ return tf.zeros([minibatch_size, 0], self.label_dtype)
160
+
161
+ # Get random labels as NumPy array.
162
+ def get_random_labels_np(self, minibatch_size): # => labels
163
+ if self.label_size > 0:
164
+ return self._np_labels[np.random.randint(self._np_labels.shape[0], size=[minibatch_size])]
165
+ else:
166
+ return np.zeros([minibatch_size, 0], self.label_dtype)
167
+
168
+ #----------------------------------------------------------------------------
169
+ # Base class for datasets that are generated on the fly.
170
+
171
+ class SyntheticDataset:
172
+ def __init__(self, resolution=1024, num_channels=3, dtype='uint8', dynamic_range=[0,255], label_size=0, label_dtype='float32'):
173
+ self.resolution = resolution
174
+ self.resolution_log2 = int(np.log2(resolution))
175
+ self.shape = [num_channels, resolution, resolution]
176
+ self.dtype = dtype
177
+ self.dynamic_range = dynamic_range
178
+ self.label_size = label_size
179
+ self.label_dtype = label_dtype
180
+ self._tf_minibatch_var = None
181
+ self._tf_lod_var = None
182
+ self._tf_minibatch_np = None
183
+ self._tf_labels_np = None
184
+
185
+ assert self.resolution == 2 ** self.resolution_log2
186
+ with tf.name_scope('Dataset'):
187
+ self._tf_minibatch_var = tf.Variable(np.int32(0), name='minibatch_var')
188
+ self._tf_lod_var = tf.Variable(np.int32(0), name='lod_var')
189
+
190
+ def configure(self, minibatch_size, lod=0):
191
+ lod = int(np.floor(lod))
192
+ assert minibatch_size >= 1 and lod >= 0 and lod <= self.resolution_log2
193
+ tfutil.set_vars({self._tf_minibatch_var: minibatch_size, self._tf_lod_var: lod})
194
+
195
+ def get_minibatch_tf(self): # => images, labels
196
+ with tf.name_scope('SyntheticDataset'):
197
+ shrink = tf.cast(2.0 ** tf.cast(self._tf_lod_var, tf.float32), tf.int32)
198
+ shape = [self.shape[0], self.shape[1] // shrink, self.shape[2] // shrink]
199
+ images = self._generate_images(self._tf_minibatch_var, self._tf_lod_var, shape)
200
+ labels = self._generate_labels(self._tf_minibatch_var)
201
+ return images, labels
202
+
203
+ def get_minibatch_np(self, minibatch_size, lod=0): # => images, labels
204
+ self.configure(minibatch_size, lod)
205
+ if self._tf_minibatch_np is None:
206
+ self._tf_minibatch_np = self.get_minibatch_tf()
207
+ return tfutil.run(self._tf_minibatch_np)
208
+
209
+ def get_random_labels_tf(self, minibatch_size): # => labels
210
+ with tf.name_scope('SyntheticDataset'):
211
+ return self._generate_labels(minibatch_size)
212
+
213
+ def get_random_labels_np(self, minibatch_size): # => labels
214
+ self.configure(minibatch_size)
215
+ if self._tf_labels_np is None:
216
+ self._tf_labels_np = self.get_random_labels_tf()
217
+ return tfutil.run(self._tf_labels_np)
218
+
219
+ def _generate_images(self, minibatch, lod, shape): # to be overridden by subclasses
220
+ return tf.zeros([minibatch] + shape, self.dtype)
221
+
222
+ def _generate_labels(self, minibatch): # to be overridden by subclasses
223
+ return tf.zeros([minibatch, self.label_size], self.label_dtype)
224
+
225
+ #----------------------------------------------------------------------------
226
+ # Helper func for constructing a dataset object using the given options.
227
+
228
+ def load_dataset(class_name='dataset.TFRecordDataset', data_dir=None, verbose=False, **kwargs):
229
+ adjusted_kwargs = dict(kwargs)
230
+ if 'tfrecord_dir' in adjusted_kwargs and data_dir is not None:
231
+ adjusted_kwargs['tfrecord_dir'] = os.path.join(data_dir, adjusted_kwargs['tfrecord_dir'])
232
+ if verbose:
233
+ print('Streaming data using %s...' % class_name)
234
+ dataset = tfutil.import_obj(class_name)(**adjusted_kwargs)
235
+ if verbose:
236
+ print('Dataset shape =', np.int32(dataset.shape).tolist())
237
+ print('Dynamic range =', dataset.dynamic_range)
238
+ print('Label size =', dataset.label_size)
239
+ return dataset
240
+
241
+ #----------------------------------------------------------------------------
models/pggan_tf_official/dataset_tool.py ADDED
@@ -0,0 +1,740 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
2
+ #
3
+ # This work is licensed under the Creative Commons Attribution-NonCommercial
4
+ # 4.0 International License. To view a copy of this license, visit
5
+ # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
6
+ # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
7
+
8
+ import os
9
+ import sys
10
+ import glob
11
+ import argparse
12
+ import threading
13
+ import six.moves.queue as Queue
14
+ import traceback
15
+ import numpy as np
16
+ import tensorflow as tf
17
+ import PIL.Image
18
+
19
+ import tfutil
20
+ import dataset
21
+
22
+ #----------------------------------------------------------------------------
23
+
24
+ def error(msg):
25
+ print('Error: ' + msg)
26
+ exit(1)
27
+
28
+ #----------------------------------------------------------------------------
29
+
30
+ class TFRecordExporter:
31
+ def __init__(self, tfrecord_dir, expected_images, print_progress=True, progress_interval=10):
32
+ self.tfrecord_dir = tfrecord_dir
33
+ self.tfr_prefix = os.path.join(self.tfrecord_dir, os.path.basename(self.tfrecord_dir))
34
+ self.expected_images = expected_images
35
+ self.cur_images = 0
36
+ self.shape = None
37
+ self.resolution_log2 = None
38
+ self.tfr_writers = []
39
+ self.print_progress = print_progress
40
+ self.progress_interval = progress_interval
41
+ if self.print_progress:
42
+ print('Creating dataset "%s"' % tfrecord_dir)
43
+ if not os.path.isdir(self.tfrecord_dir):
44
+ os.makedirs(self.tfrecord_dir)
45
+ assert(os.path.isdir(self.tfrecord_dir))
46
+
47
+ def close(self):
48
+ if self.print_progress:
49
+ print('%-40s\r' % 'Flushing data...', end='', flush=True)
50
+ for tfr_writer in self.tfr_writers:
51
+ tfr_writer.close()
52
+ self.tfr_writers = []
53
+ if self.print_progress:
54
+ print('%-40s\r' % '', end='', flush=True)
55
+ print('Added %d images.' % self.cur_images)
56
+
57
+ def choose_shuffled_order(self): # Note: Images and labels must be added in shuffled order.
58
+ order = np.arange(self.expected_images)
59
+ np.random.RandomState(123).shuffle(order)
60
+ return order
61
+
62
+ def add_image(self, img):
63
+ if self.print_progress and self.cur_images % self.progress_interval == 0:
64
+ print('%d / %d\r' % (self.cur_images, self.expected_images), end='', flush=True)
65
+ if self.shape is None:
66
+ self.shape = img.shape
67
+ self.resolution_log2 = int(np.log2(self.shape[1]))
68
+ assert self.shape[0] in [1, 3]
69
+ assert self.shape[1] == self.shape[2]
70
+ assert self.shape[1] == 2**self.resolution_log2
71
+ tfr_opt = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.NONE)
72
+ for lod in range(self.resolution_log2 - 1):
73
+ tfr_file = self.tfr_prefix + '-r%02d.tfrecords' % (self.resolution_log2 - lod)
74
+ self.tfr_writers.append(tf.python_io.TFRecordWriter(tfr_file, tfr_opt))
75
+ assert img.shape == self.shape
76
+ for lod, tfr_writer in enumerate(self.tfr_writers):
77
+ if lod:
78
+ img = img.astype(np.float32)
79
+ img = (img[:, 0::2, 0::2] + img[:, 0::2, 1::2] + img[:, 1::2, 0::2] + img[:, 1::2, 1::2]) * 0.25
80
+ quant = np.rint(img).clip(0, 255).astype(np.uint8)
81
+ ex = tf.train.Example(features=tf.train.Features(feature={
82
+ 'shape': tf.train.Feature(int64_list=tf.train.Int64List(value=quant.shape)),
83
+ 'data': tf.train.Feature(bytes_list=tf.train.BytesList(value=[quant.tostring()]))}))
84
+ tfr_writer.write(ex.SerializeToString())
85
+ self.cur_images += 1
86
+
87
+ def add_labels(self, labels):
88
+ if self.print_progress:
89
+ print('%-40s\r' % 'Saving labels...', end='', flush=True)
90
+ assert labels.shape[0] == self.cur_images
91
+ with open(self.tfr_prefix + '-rxx.labels', 'wb') as f:
92
+ np.save(f, labels.astype(np.float32))
93
+
94
+ def __enter__(self):
95
+ return self
96
+
97
+ def __exit__(self, *args):
98
+ self.close()
99
+
100
+ #----------------------------------------------------------------------------
101
+
102
+ class ExceptionInfo(object):
103
+ def __init__(self):
104
+ self.value = sys.exc_info()[1]
105
+ self.traceback = traceback.format_exc()
106
+
107
+ #----------------------------------------------------------------------------
108
+
109
+ class WorkerThread(threading.Thread):
110
+ def __init__(self, task_queue):
111
+ threading.Thread.__init__(self)
112
+ self.task_queue = task_queue
113
+
114
+ def run(self):
115
+ while True:
116
+ func, args, result_queue = self.task_queue.get()
117
+ if func is None:
118
+ break
119
+ try:
120
+ result = func(*args)
121
+ except:
122
+ result = ExceptionInfo()
123
+ result_queue.put((result, args))
124
+
125
+ #----------------------------------------------------------------------------
126
+
127
+ class ThreadPool(object):
128
+ def __init__(self, num_threads):
129
+ assert num_threads >= 1
130
+ self.task_queue = Queue.Queue()
131
+ self.result_queues = dict()
132
+ self.num_threads = num_threads
133
+ for idx in range(self.num_threads):
134
+ thread = WorkerThread(self.task_queue)
135
+ thread.daemon = True
136
+ thread.start()
137
+
138
+ def add_task(self, func, args=()):
139
+ assert hasattr(func, '__call__') # must be a function
140
+ if func not in self.result_queues:
141
+ self.result_queues[func] = Queue.Queue()
142
+ self.task_queue.put((func, args, self.result_queues[func]))
143
+
144
+ def get_result(self, func): # returns (result, args)
145
+ result, args = self.result_queues[func].get()
146
+ if isinstance(result, ExceptionInfo):
147
+ print('\n\nWorker thread caught an exception:\n' + result.traceback)
148
+ raise result.value
149
+ return result, args
150
+
151
+ def finish(self):
152
+ for idx in range(self.num_threads):
153
+ self.task_queue.put((None, (), None))
154
+
155
+ def __enter__(self): # for 'with' statement
156
+ return self
157
+
158
+ def __exit__(self, *excinfo):
159
+ self.finish()
160
+
161
+ def process_items_concurrently(self, item_iterator, process_func=lambda x: x, pre_func=lambda x: x, post_func=lambda x: x, max_items_in_flight=None):
162
+ if max_items_in_flight is None: max_items_in_flight = self.num_threads * 4
163
+ assert max_items_in_flight >= 1
164
+ results = []
165
+ retire_idx = [0]
166
+
167
+ def task_func(prepared, idx):
168
+ return process_func(prepared)
169
+
170
+ def retire_result():
171
+ processed, (prepared, idx) = self.get_result(task_func)
172
+ results[idx] = processed
173
+ while retire_idx[0] < len(results) and results[retire_idx[0]] is not None:
174
+ yield post_func(results[retire_idx[0]])
175
+ results[retire_idx[0]] = None
176
+ retire_idx[0] += 1
177
+
178
+ for idx, item in enumerate(item_iterator):
179
+ prepared = pre_func(item)
180
+ results.append(None)
181
+ self.add_task(func=task_func, args=(prepared, idx))
182
+ while retire_idx[0] < idx - max_items_in_flight + 2:
183
+ for res in retire_result(): yield res
184
+ while retire_idx[0] < len(results):
185
+ for res in retire_result(): yield res
186
+
187
+ #----------------------------------------------------------------------------
188
+
189
+ def display(tfrecord_dir):
190
+ print('Loading dataset "%s"' % tfrecord_dir)
191
+ tfutil.init_tf({'gpu_options.allow_growth': True})
192
+ dset = dataset.TFRecordDataset(tfrecord_dir, max_label_size='full', repeat=False, shuffle_mb=0)
193
+ tfutil.init_uninited_vars()
194
+
195
+ idx = 0
196
+ while True:
197
+ try:
198
+ images, labels = dset.get_minibatch_np(1)
199
+ except tf.errors.OutOfRangeError:
200
+ break
201
+ if idx == 0:
202
+ print('Displaying images')
203
+ import cv2 # pip install opencv-python
204
+ cv2.namedWindow('dataset_tool')
205
+ print('Press SPACE or ENTER to advance, ESC to exit')
206
+ print('\nidx = %-8d\nlabel = %s' % (idx, labels[0].tolist()))
207
+ cv2.imshow('dataset_tool', images[0].transpose(1, 2, 0)[:, :, ::-1]) # CHW => HWC, RGB => BGR
208
+ idx += 1
209
+ if cv2.waitKey() == 27:
210
+ break
211
+ print('\nDisplayed %d images.' % idx)
212
+
213
+ #----------------------------------------------------------------------------
214
+
215
+ def extract(tfrecord_dir, output_dir):
216
+ print('Loading dataset "%s"' % tfrecord_dir)
217
+ tfutil.init_tf({'gpu_options.allow_growth': True})
218
+ dset = dataset.TFRecordDataset(tfrecord_dir, max_label_size=0, repeat=False, shuffle_mb=0)
219
+ tfutil.init_uninited_vars()
220
+
221
+ print('Extracting images to "%s"' % output_dir)
222
+ if not os.path.isdir(output_dir):
223
+ os.makedirs(output_dir)
224
+ idx = 0
225
+ while True:
226
+ if idx % 10 == 0:
227
+ print('%d\r' % idx, end='', flush=True)
228
+ try:
229
+ images, labels = dset.get_minibatch_np(1)
230
+ except tf.errors.OutOfRangeError:
231
+ break
232
+ if images.shape[1] == 1:
233
+ img = PIL.Image.fromarray(images[0][0], 'L')
234
+ else:
235
+ img = PIL.Image.fromarray(images[0].transpose(1, 2, 0), 'RGB')
236
+ img.save(os.path.join(output_dir, 'img%08d.png' % idx))
237
+ idx += 1
238
+ print('Extracted %d images.' % idx)
239
+
240
+ #----------------------------------------------------------------------------
241
+
242
+ def compare(tfrecord_dir_a, tfrecord_dir_b, ignore_labels):
243
+ max_label_size = 0 if ignore_labels else 'full'
244
+ print('Loading dataset "%s"' % tfrecord_dir_a)
245
+ tfutil.init_tf({'gpu_options.allow_growth': True})
246
+ dset_a = dataset.TFRecordDataset(tfrecord_dir_a, max_label_size=max_label_size, repeat=False, shuffle_mb=0)
247
+ print('Loading dataset "%s"' % tfrecord_dir_b)
248
+ dset_b = dataset.TFRecordDataset(tfrecord_dir_b, max_label_size=max_label_size, repeat=False, shuffle_mb=0)
249
+ tfutil.init_uninited_vars()
250
+
251
+ print('Comparing datasets')
252
+ idx = 0
253
+ identical_images = 0
254
+ identical_labels = 0
255
+ while True:
256
+ if idx % 100 == 0:
257
+ print('%d\r' % idx, end='', flush=True)
258
+ try:
259
+ images_a, labels_a = dset_a.get_minibatch_np(1)
260
+ except tf.errors.OutOfRangeError:
261
+ images_a, labels_a = None, None
262
+ try:
263
+ images_b, labels_b = dset_b.get_minibatch_np(1)
264
+ except tf.errors.OutOfRangeError:
265
+ images_b, labels_b = None, None
266
+ if images_a is None or images_b is None:
267
+ if images_a is not None or images_b is not None:
268
+ print('Datasets contain different number of images')
269
+ break
270
+ if images_a.shape == images_b.shape and np.all(images_a == images_b):
271
+ identical_images += 1
272
+ else:
273
+ print('Image %d is different' % idx)
274
+ if labels_a.shape == labels_b.shape and np.all(labels_a == labels_b):
275
+ identical_labels += 1
276
+ else:
277
+ print('Label %d is different' % idx)
278
+ idx += 1
279
+ print('Identical images: %d / %d' % (identical_images, idx))
280
+ if not ignore_labels:
281
+ print('Identical labels: %d / %d' % (identical_labels, idx))
282
+
283
+ #----------------------------------------------------------------------------
284
+
285
+ def create_mnist(tfrecord_dir, mnist_dir):
286
+ print('Loading MNIST from "%s"' % mnist_dir)
287
+ import gzip
288
+ with gzip.open(os.path.join(mnist_dir, 'train-images-idx3-ubyte.gz'), 'rb') as file:
289
+ images = np.frombuffer(file.read(), np.uint8, offset=16)
290
+ with gzip.open(os.path.join(mnist_dir, 'train-labels-idx1-ubyte.gz'), 'rb') as file:
291
+ labels = np.frombuffer(file.read(), np.uint8, offset=8)
292
+ images = images.reshape(-1, 1, 28, 28)
293
+ images = np.pad(images, [(0,0), (0,0), (2,2), (2,2)], 'constant', constant_values=0)
294
+ assert images.shape == (60000, 1, 32, 32) and images.dtype == np.uint8
295
+ assert labels.shape == (60000,) and labels.dtype == np.uint8
296
+ assert np.min(images) == 0 and np.max(images) == 255
297
+ assert np.min(labels) == 0 and np.max(labels) == 9
298
+ onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)
299
+ onehot[np.arange(labels.size), labels] = 1.0
300
+
301
+ with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:
302
+ order = tfr.choose_shuffled_order()
303
+ for idx in range(order.size):
304
+ tfr.add_image(images[order[idx]])
305
+ tfr.add_labels(onehot[order])
306
+
307
+ #----------------------------------------------------------------------------
308
+
309
+ def create_mnistrgb(tfrecord_dir, mnist_dir, num_images=1000000, random_seed=123):
310
+ print('Loading MNIST from "%s"' % mnist_dir)
311
+ import gzip
312
+ with gzip.open(os.path.join(mnist_dir, 'train-images-idx3-ubyte.gz'), 'rb') as file:
313
+ images = np.frombuffer(file.read(), np.uint8, offset=16)
314
+ images = images.reshape(-1, 28, 28)
315
+ images = np.pad(images, [(0,0), (2,2), (2,2)], 'constant', constant_values=0)
316
+ assert images.shape == (60000, 32, 32) and images.dtype == np.uint8
317
+ assert np.min(images) == 0 and np.max(images) == 255
318
+
319
+ with TFRecordExporter(tfrecord_dir, num_images) as tfr:
320
+ rnd = np.random.RandomState(random_seed)
321
+ for idx in range(num_images):
322
+ tfr.add_image(images[rnd.randint(images.shape[0], size=3)])
323
+
324
+ #----------------------------------------------------------------------------
325
+
326
+ def create_cifar10(tfrecord_dir, cifar10_dir):
327
+ print('Loading CIFAR-10 from "%s"' % cifar10_dir)
328
+ import pickle
329
+ images = []
330
+ labels = []
331
+ for batch in range(1, 6):
332
+ with open(os.path.join(cifar10_dir, 'data_batch_%d' % batch), 'rb') as file:
333
+ data = pickle.load(file, encoding='latin1')
334
+ images.append(data['data'].reshape(-1, 3, 32, 32))
335
+ labels.append(data['labels'])
336
+ images = np.concatenate(images)
337
+ labels = np.concatenate(labels)
338
+ assert images.shape == (50000, 3, 32, 32) and images.dtype == np.uint8
339
+ assert labels.shape == (50000,) and labels.dtype == np.int32
340
+ assert np.min(images) == 0 and np.max(images) == 255
341
+ assert np.min(labels) == 0 and np.max(labels) == 9
342
+ onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)
343
+ onehot[np.arange(labels.size), labels] = 1.0
344
+
345
+ with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:
346
+ order = tfr.choose_shuffled_order()
347
+ for idx in range(order.size):
348
+ tfr.add_image(images[order[idx]])
349
+ tfr.add_labels(onehot[order])
350
+
351
+ #----------------------------------------------------------------------------
352
+
353
+ def create_cifar100(tfrecord_dir, cifar100_dir):
354
+ print('Loading CIFAR-100 from "%s"' % cifar100_dir)
355
+ import pickle
356
+ with open(os.path.join(cifar100_dir, 'train'), 'rb') as file:
357
+ data = pickle.load(file, encoding='latin1')
358
+ images = data['data'].reshape(-1, 3, 32, 32)
359
+ labels = np.array(data['fine_labels'])
360
+ assert images.shape == (50000, 3, 32, 32) and images.dtype == np.uint8
361
+ assert labels.shape == (50000,) and labels.dtype == np.int32
362
+ assert np.min(images) == 0 and np.max(images) == 255
363
+ assert np.min(labels) == 0 and np.max(labels) == 99
364
+ onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)
365
+ onehot[np.arange(labels.size), labels] = 1.0
366
+
367
+ with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:
368
+ order = tfr.choose_shuffled_order()
369
+ for idx in range(order.size):
370
+ tfr.add_image(images[order[idx]])
371
+ tfr.add_labels(onehot[order])
372
+
373
+ #----------------------------------------------------------------------------
374
+
375
+ def create_svhn(tfrecord_dir, svhn_dir):
376
+ print('Loading SVHN from "%s"' % svhn_dir)
377
+ import pickle
378
+ images = []
379
+ labels = []
380
+ for batch in range(1, 4):
381
+ with open(os.path.join(svhn_dir, 'train_%d.pkl' % batch), 'rb') as file:
382
+ data = pickle.load(file, encoding='latin1')
383
+ images.append(data[0])
384
+ labels.append(data[1])
385
+ images = np.concatenate(images)
386
+ labels = np.concatenate(labels)
387
+ assert images.shape == (73257, 3, 32, 32) and images.dtype == np.uint8
388
+ assert labels.shape == (73257,) and labels.dtype == np.uint8
389
+ assert np.min(images) == 0 and np.max(images) == 255
390
+ assert np.min(labels) == 0 and np.max(labels) == 9
391
+ onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)
392
+ onehot[np.arange(labels.size), labels] = 1.0
393
+
394
+ with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:
395
+ order = tfr.choose_shuffled_order()
396
+ for idx in range(order.size):
397
+ tfr.add_image(images[order[idx]])
398
+ tfr.add_labels(onehot[order])
399
+
400
+ #----------------------------------------------------------------------------
401
+
402
+ def create_lsun(tfrecord_dir, lmdb_dir, resolution=256, max_images=None):
403
+ print('Loading LSUN dataset from "%s"' % lmdb_dir)
404
+ import lmdb # pip install lmdb
405
+ import cv2 # pip install opencv-python
406
+ import io
407
+ with lmdb.open(lmdb_dir, readonly=True).begin(write=False) as txn:
408
+ total_images = txn.stat()['entries']
409
+ if max_images is None:
410
+ max_images = total_images
411
+ with TFRecordExporter(tfrecord_dir, max_images) as tfr:
412
+ for idx, (key, value) in enumerate(txn.cursor()):
413
+ try:
414
+ try:
415
+ img = cv2.imdecode(np.fromstring(value, dtype=np.uint8), 1)
416
+ if img is None:
417
+ raise IOError('cv2.imdecode failed')
418
+ img = img[:, :, ::-1] # BGR => RGB
419
+ except IOError:
420
+ img = np.asarray(PIL.Image.open(io.BytesIO(value)))
421
+ crop = np.min(img.shape[:2])
422
+ img = img[(img.shape[0] - crop) // 2 : (img.shape[0] + crop) // 2, (img.shape[1] - crop) // 2 : (img.shape[1] + crop) // 2]
423
+ img = PIL.Image.fromarray(img, 'RGB')
424
+ img = img.resize((resolution, resolution), PIL.Image.ANTIALIAS)
425
+ img = np.asarray(img)
426
+ img = img.transpose(2, 0, 1) # HWC => CHW
427
+ tfr.add_image(img)
428
+ except:
429
+ print(sys.exc_info()[1])
430
+ if tfr.cur_images == max_images:
431
+ break
432
+
433
+ #----------------------------------------------------------------------------
434
+
435
+ def create_celeba(tfrecord_dir, celeba_dir, cx=89, cy=121):
436
+ print('Loading CelebA from "%s"' % celeba_dir)
437
+ glob_pattern = os.path.join(celeba_dir, 'img_align_celeba_png', '*.png')
438
+ image_filenames = sorted(glob.glob(glob_pattern))
439
+ expected_images = 202599
440
+ if len(image_filenames) != expected_images:
441
+ error('Expected to find %d images' % expected_images)
442
+
443
+ with TFRecordExporter(tfrecord_dir, len(image_filenames)) as tfr:
444
+ order = tfr.choose_shuffled_order()
445
+ for idx in range(order.size):
446
+ img = np.asarray(PIL.Image.open(image_filenames[order[idx]]))
447
+ assert img.shape == (218, 178, 3)
448
+ img = img[cy - 64 : cy + 64, cx - 64 : cx + 64]
449
+ img = img.transpose(2, 0, 1) # HWC => CHW
450
+ tfr.add_image(img)
451
+
452
+ #----------------------------------------------------------------------------
453
+
454
+ def create_celebahq(tfrecord_dir, celeba_dir, delta_dir, num_threads=4, num_tasks=100):
455
+ print('Loading CelebA from "%s"' % celeba_dir)
456
+ expected_images = 202599
457
+ if len(glob.glob(os.path.join(celeba_dir, 'img_celeba', '*.jpg'))) != expected_images:
458
+ error('Expected to find %d images' % expected_images)
459
+ with open(os.path.join(celeba_dir, 'Anno', 'list_landmarks_celeba.txt'), 'rt') as file:
460
+ landmarks = [[float(value) for value in line.split()[1:]] for line in file.readlines()[2:]]
461
+ landmarks = np.float32(landmarks).reshape(-1, 5, 2)
462
+
463
+ print('Loading CelebA-HQ deltas from "%s"' % delta_dir)
464
+ import scipy.ndimage
465
+ import hashlib
466
+ import bz2
467
+ import zipfile
468
+ import base64
469
+ import cryptography.hazmat.primitives.hashes
470
+ import cryptography.hazmat.backends
471
+ import cryptography.hazmat.primitives.kdf.pbkdf2
472
+ import cryptography.fernet
473
+ expected_zips = 30
474
+ if len(glob.glob(os.path.join(delta_dir, 'delta*.zip'))) != expected_zips:
475
+ error('Expected to find %d zips' % expected_zips)
476
+ with open(os.path.join(delta_dir, 'image_list.txt'), 'rt') as file:
477
+ lines = [line.split() for line in file]
478
+ fields = dict()
479
+ for idx, field in enumerate(lines[0]):
480
+ type = int if field.endswith('idx') else str
481
+ fields[field] = [type(line[idx]) for line in lines[1:]]
482
+ indices = np.array(fields['idx'])
483
+
484
+ # Must use pillow version 3.1.1 for everything to work correctly.
485
+ if getattr(PIL, 'PILLOW_VERSION', '') != '3.1.1':
486
+ error('create_celebahq requires pillow version 3.1.1') # conda install pillow=3.1.1
487
+
488
+ # Must use libjpeg version 8d for everything to work correctly.
489
+ img = np.array(PIL.Image.open(os.path.join(celeba_dir, 'img_celeba', '000001.jpg')))
490
+ md5 = hashlib.md5()
491
+ md5.update(img.tobytes())
492
+ if md5.hexdigest() != '9cad8178d6cb0196b36f7b34bc5eb6d3':
493
+ error('create_celebahq requires libjpeg version 8d') # conda install jpeg=8d
494
+
495
+ def rot90(v):
496
+ return np.array([-v[1], v[0]])
497
+
498
+ def process_func(idx):
499
+ # Load original image.
500
+ orig_idx = fields['orig_idx'][idx]
501
+ orig_file = fields['orig_file'][idx]
502
+ orig_path = os.path.join(celeba_dir, 'img_celeba', orig_file)
503
+ img = PIL.Image.open(orig_path)
504
+
505
+ # Choose oriented crop rectangle.
506
+ lm = landmarks[orig_idx]
507
+ eye_avg = (lm[0] + lm[1]) * 0.5 + 0.5
508
+ mouth_avg = (lm[3] + lm[4]) * 0.5 + 0.5
509
+ eye_to_eye = lm[1] - lm[0]
510
+ eye_to_mouth = mouth_avg - eye_avg
511
+ x = eye_to_eye - rot90(eye_to_mouth)
512
+ x /= np.hypot(*x)
513
+ x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
514
+ y = rot90(x)
515
+ c = eye_avg + eye_to_mouth * 0.1
516
+ quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
517
+ zoom = 1024 / (np.hypot(*x) * 2)
518
+
519
+ # Shrink.
520
+ shrink = int(np.floor(0.5 / zoom))
521
+ if shrink > 1:
522
+ size = (int(np.round(float(img.size[0]) / shrink)), int(np.round(float(img.size[1]) / shrink)))
523
+ img = img.resize(size, PIL.Image.ANTIALIAS)
524
+ quad /= shrink
525
+ zoom *= shrink
526
+
527
+ # Crop.
528
+ border = max(int(np.round(1024 * 0.1 / zoom)), 3)
529
+ crop = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
530
+ crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), min(crop[3] + border, img.size[1]))
531
+ if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:
532
+ img = img.crop(crop)
533
+ quad -= crop[0:2]
534
+
535
+ # Simulate super-resolution.
536
+ superres = int(np.exp2(np.ceil(np.log2(zoom))))
537
+ if superres > 1:
538
+ img = img.resize((img.size[0] * superres, img.size[1] * superres), PIL.Image.ANTIALIAS)
539
+ quad *= superres
540
+ zoom /= superres
541
+
542
+ # Pad.
543
+ pad = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
544
+ pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0), max(pad[3] - img.size[1] + border, 0))
545
+ if max(pad) > border - 4:
546
+ pad = np.maximum(pad, int(np.round(1024 * 0.3 / zoom)))
547
+ img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
548
+ h, w, _ = img.shape
549
+ y, x, _ = np.mgrid[:h, :w, :1]
550
+ mask = 1.0 - np.minimum(np.minimum(np.float32(x) / pad[0], np.float32(y) / pad[1]), np.minimum(np.float32(w-1-x) / pad[2], np.float32(h-1-y) / pad[3]))
551
+ blur = 1024 * 0.02 / zoom
552
+ img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
553
+ img += (np.median(img, axis=(0,1)) - img) * np.clip(mask, 0.0, 1.0)
554
+ img = PIL.Image.fromarray(np.uint8(np.clip(np.round(img), 0, 255)), 'RGB')
555
+ quad += pad[0:2]
556
+
557
+ # Transform.
558
+ img = img.transform((4096, 4096), PIL.Image.QUAD, (quad + 0.5).flatten(), PIL.Image.BILINEAR)
559
+ img = img.resize((1024, 1024), PIL.Image.ANTIALIAS)
560
+ img = np.asarray(img).transpose(2, 0, 1)
561
+
562
+ # Verify MD5.
563
+ md5 = hashlib.md5()
564
+ md5.update(img.tobytes())
565
+ assert md5.hexdigest() == fields['proc_md5'][idx]
566
+
567
+ # Load delta image and original JPG.
568
+ with zipfile.ZipFile(os.path.join(delta_dir, 'deltas%05d.zip' % (idx - idx % 1000)), 'r') as zip:
569
+ delta_bytes = zip.read('delta%05d.dat' % idx)
570
+ with open(orig_path, 'rb') as file:
571
+ orig_bytes = file.read()
572
+
573
+ # Decrypt delta image, using original JPG data as decryption key.
574
+ algorithm = cryptography.hazmat.primitives.hashes.SHA256()
575
+ backend = cryptography.hazmat.backends.default_backend()
576
+ salt = bytes(orig_file, 'ascii')
577
+ kdf = cryptography.hazmat.primitives.kdf.pbkdf2.PBKDF2HMAC(algorithm=algorithm, length=32, salt=salt, iterations=100000, backend=backend)
578
+ key = base64.urlsafe_b64encode(kdf.derive(orig_bytes))
579
+ delta = np.frombuffer(bz2.decompress(cryptography.fernet.Fernet(key).decrypt(delta_bytes)), dtype=np.uint8).reshape(3, 1024, 1024)
580
+
581
+ # Apply delta image.
582
+ img = img + delta
583
+
584
+ # Verify MD5.
585
+ md5 = hashlib.md5()
586
+ md5.update(img.tobytes())
587
+ assert md5.hexdigest() == fields['final_md5'][idx]
588
+ return img
589
+
590
+ with TFRecordExporter(tfrecord_dir, indices.size) as tfr:
591
+ order = tfr.choose_shuffled_order()
592
+ with ThreadPool(num_threads) as pool:
593
+ for img in pool.process_items_concurrently(indices[order].tolist(), process_func=process_func, max_items_in_flight=num_tasks):
594
+ tfr.add_image(img)
595
+
596
+ #----------------------------------------------------------------------------
597
+
598
+ def create_from_images(tfrecord_dir, image_dir, shuffle):
599
+ print('Loading images from "%s"' % image_dir)
600
+ image_filenames = sorted(glob.glob(os.path.join(image_dir, '*')))
601
+ if len(image_filenames) == 0:
602
+ error('No input images found')
603
+
604
+ img = np.asarray(PIL.Image.open(image_filenames[0]))
605
+ resolution = img.shape[0]
606
+ channels = img.shape[2] if img.ndim == 3 else 1
607
+ if img.shape[1] != resolution:
608
+ error('Input images must have the same width and height')
609
+ if resolution != 2 ** int(np.floor(np.log2(resolution))):
610
+ error('Input image resolution must be a power-of-two')
611
+ if channels not in [1, 3]:
612
+ error('Input images must be stored as RGB or grayscale')
613
+
614
+ with TFRecordExporter(tfrecord_dir, len(image_filenames)) as tfr:
615
+ order = tfr.choose_shuffled_order() if shuffle else np.arange(len(image_filenames))
616
+ for idx in range(order.size):
617
+ img = np.asarray(PIL.Image.open(image_filenames[order[idx]]))
618
+ if channels == 1:
619
+ img = img[np.newaxis, :, :] # HW => CHW
620
+ else:
621
+ img = img.transpose(2, 0, 1) # HWC => CHW
622
+ tfr.add_image(img)
623
+
624
+ #----------------------------------------------------------------------------
625
+
626
+ def create_from_hdf5(tfrecord_dir, hdf5_filename, shuffle):
627
+ print('Loading HDF5 archive from "%s"' % hdf5_filename)
628
+ import h5py # conda install h5py
629
+ with h5py.File(hdf5_filename, 'r') as hdf5_file:
630
+ hdf5_data = max([value for key, value in hdf5_file.items() if key.startswith('data')], key=lambda lod: lod.shape[3])
631
+ with TFRecordExporter(tfrecord_dir, hdf5_data.shape[0]) as tfr:
632
+ order = tfr.choose_shuffled_order() if shuffle else np.arange(hdf5_data.shape[0])
633
+ for idx in range(order.size):
634
+ tfr.add_image(hdf5_data[order[idx]])
635
+ npy_filename = os.path.splitext(hdf5_filename)[0] + '-labels.npy'
636
+ if os.path.isfile(npy_filename):
637
+ tfr.add_labels(np.load(npy_filename)[order])
638
+
639
+ #----------------------------------------------------------------------------
640
+
641
+ def execute_cmdline(argv):
642
+ prog = argv[0]
643
+ parser = argparse.ArgumentParser(
644
+ prog = prog,
645
+ description = 'Tool for creating, extracting, and visualizing Progressive GAN datasets.',
646
+ epilog = 'Type "%s <command> -h" for more information.' % prog)
647
+
648
+ subparsers = parser.add_subparsers(dest='command')
649
+ subparsers.required = True
650
+ def add_command(cmd, desc, example=None):
651
+ epilog = 'Example: %s %s' % (prog, example) if example is not None else None
652
+ return subparsers.add_parser(cmd, description=desc, help=desc, epilog=epilog)
653
+
654
+ p = add_command( 'display', 'Display images in dataset.',
655
+ 'display datasets/mnist')
656
+ p.add_argument( 'tfrecord_dir', help='Directory containing dataset')
657
+
658
+ p = add_command( 'extract', 'Extract images from dataset.',
659
+ 'extract datasets/mnist mnist-images')
660
+ p.add_argument( 'tfrecord_dir', help='Directory containing dataset')
661
+ p.add_argument( 'output_dir', help='Directory to extract the images into')
662
+
663
+ p = add_command( 'compare', 'Compare two datasets.',
664
+ 'compare datasets/mydataset datasets/mnist')
665
+ p.add_argument( 'tfrecord_dir_a', help='Directory containing first dataset')
666
+ p.add_argument( 'tfrecord_dir_b', help='Directory containing second dataset')
667
+ p.add_argument( '--ignore_labels', help='Ignore labels (default: 0)', type=int, default=0)
668
+
669
+ p = add_command( 'create_mnist', 'Create dataset for MNIST.',
670
+ 'create_mnist datasets/mnist ~/downloads/mnist')
671
+ p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
672
+ p.add_argument( 'mnist_dir', help='Directory containing MNIST')
673
+
674
+ p = add_command( 'create_mnistrgb', 'Create dataset for MNIST-RGB.',
675
+ 'create_mnistrgb datasets/mnistrgb ~/downloads/mnist')
676
+ p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
677
+ p.add_argument( 'mnist_dir', help='Directory containing MNIST')
678
+ p.add_argument( '--num_images', help='Number of composite images to create (default: 1000000)', type=int, default=1000000)
679
+ p.add_argument( '--random_seed', help='Random seed (default: 123)', type=int, default=123)
680
+
681
+ p = add_command( 'create_cifar10', 'Create dataset for CIFAR-10.',
682
+ 'create_cifar10 datasets/cifar10 ~/downloads/cifar10')
683
+ p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
684
+ p.add_argument( 'cifar10_dir', help='Directory containing CIFAR-10')
685
+
686
+ p = add_command( 'create_cifar100', 'Create dataset for CIFAR-100.',
687
+ 'create_cifar100 datasets/cifar100 ~/downloads/cifar100')
688
+ p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
689
+ p.add_argument( 'cifar100_dir', help='Directory containing CIFAR-100')
690
+
691
+ p = add_command( 'create_svhn', 'Create dataset for SVHN.',
692
+ 'create_svhn datasets/svhn ~/downloads/svhn')
693
+ p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
694
+ p.add_argument( 'svhn_dir', help='Directory containing SVHN')
695
+
696
+ p = add_command( 'create_lsun', 'Create dataset for single LSUN category.',
697
+ 'create_lsun datasets/lsun-car-100k ~/downloads/lsun/car_lmdb --resolution 256 --max_images 100000')
698
+ p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
699
+ p.add_argument( 'lmdb_dir', help='Directory containing LMDB database')
700
+ p.add_argument( '--resolution', help='Output resolution (default: 256)', type=int, default=256)
701
+ p.add_argument( '--max_images', help='Maximum number of images (default: none)', type=int, default=None)
702
+
703
+ p = add_command( 'create_celeba', 'Create dataset for CelebA.',
704
+ 'create_celeba datasets/celeba ~/downloads/celeba')
705
+ p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
706
+ p.add_argument( 'celeba_dir', help='Directory containing CelebA')
707
+ p.add_argument( '--cx', help='Center X coordinate (default: 89)', type=int, default=89)
708
+ p.add_argument( '--cy', help='Center Y coordinate (default: 121)', type=int, default=121)
709
+
710
+ p = add_command( 'create_celebahq', 'Create dataset for CelebA-HQ.',
711
+ 'create_celebahq datasets/celebahq ~/downloads/celeba ~/downloads/celeba-hq-deltas')
712
+ p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
713
+ p.add_argument( 'celeba_dir', help='Directory containing CelebA')
714
+ p.add_argument( 'delta_dir', help='Directory containing CelebA-HQ deltas')
715
+ p.add_argument( '--num_threads', help='Number of concurrent threads (default: 4)', type=int, default=4)
716
+ p.add_argument( '--num_tasks', help='Number of concurrent processing tasks (default: 100)', type=int, default=100)
717
+
718
+ p = add_command( 'create_from_images', 'Create dataset from a directory full of images.',
719
+ 'create_from_images datasets/mydataset myimagedir')
720
+ p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
721
+ p.add_argument( 'image_dir', help='Directory containing the images')
722
+ p.add_argument( '--shuffle', help='Randomize image order (default: 1)', type=int, default=1)
723
+
724
+ p = add_command( 'create_from_hdf5', 'Create dataset from legacy HDF5 archive.',
725
+ 'create_from_hdf5 datasets/celebahq ~/downloads/celeba-hq-1024x1024.h5')
726
+ p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
727
+ p.add_argument( 'hdf5_filename', help='HDF5 archive containing the images')
728
+ p.add_argument( '--shuffle', help='Randomize image order (default: 1)', type=int, default=1)
729
+
730
+ args = parser.parse_args(argv[1:] if len(argv) > 1 else ['-h'])
731
+ func = globals()[args.command]
732
+ del args.command
733
+ func(**vars(args))
734
+
735
+ #----------------------------------------------------------------------------
736
+
737
+ if __name__ == "__main__":
738
+ execute_cmdline(sys.argv)
739
+
740
+ #----------------------------------------------------------------------------
models/pggan_tf_official/legacy.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
2
+ #
3
+ # This work is licensed under the Creative Commons Attribution-NonCommercial
4
+ # 4.0 International License. To view a copy of this license, visit
5
+ # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
6
+ # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
7
+
8
+ import pickle
9
+ import inspect
10
+ import numpy as np
11
+
12
+ import tfutil
13
+ import networks
14
+
15
+ #----------------------------------------------------------------------------
16
+ # Custom unpickler that is able to load network pickles produced by
17
+ # the old Theano implementation.
18
+
19
+ class LegacyUnpickler(pickle.Unpickler):
20
+ def __init__(self, *args, **kwargs):
21
+ super().__init__(*args, **kwargs)
22
+
23
+ def find_class(self, module, name):
24
+ if module == 'network' and name == 'Network':
25
+ return tfutil.Network
26
+ return super().find_class(module, name)
27
+
28
+ #----------------------------------------------------------------------------
29
+ # Import handler for tfutil.Network that silently converts networks produced
30
+ # by the old Theano implementation to a suitable format.
31
+
32
+ theano_gan_remap = {
33
+ 'G_paper': 'G_paper',
34
+ 'G_progressive_8': 'G_paper',
35
+ 'D_paper': 'D_paper',
36
+ 'D_progressive_8': 'D_paper'}
37
+
38
+ def patch_theano_gan(state):
39
+ if 'version' in state or state['build_func_spec']['func'] not in theano_gan_remap:
40
+ return state
41
+
42
+ spec = dict(state['build_func_spec'])
43
+ func = spec.pop('func')
44
+ resolution = spec.get('resolution', 32)
45
+ resolution_log2 = int(np.log2(resolution))
46
+ use_wscale = spec.get('use_wscale', True)
47
+
48
+ assert spec.pop('label_size', 0) == 0
49
+ assert spec.pop('use_batchnorm', False) == False
50
+ assert spec.pop('tanh_at_end', None) is None
51
+ assert spec.pop('mbstat_func', 'Tstdeps') == 'Tstdeps'
52
+ assert spec.pop('mbstat_avg', 'all') == 'all'
53
+ assert spec.pop('mbdisc_kernels', None) is None
54
+ spec.pop( 'use_gdrop', True) # doesn't make a difference
55
+ assert spec.pop('use_layernorm', False) == False
56
+ spec[ 'fused_scale'] = False
57
+ spec[ 'mbstd_group_size'] = 16
58
+
59
+ vars = []
60
+ param_iter = iter(state['param_values'])
61
+ relu = np.sqrt(2); linear = 1.0
62
+ def flatten2(w): return w.reshape(w.shape[0], -1)
63
+ def he_std(gain, w): return gain / np.sqrt(np.prod(w.shape[:-1]))
64
+ def wscale(gain, w): return w * next(param_iter) / he_std(gain, w) if use_wscale else w
65
+ def layer(name, gain, w): return [(name + '/weight', wscale(gain, w)), (name + '/bias', next(param_iter))]
66
+
67
+ if func.startswith('G'):
68
+ vars += layer('4x4/Dense', relu/4, flatten2(next(param_iter).transpose(1,0,2,3)))
69
+ vars += layer('4x4/Conv', relu, next(param_iter).transpose(2,3,1,0)[::-1,::-1])
70
+ for res in range(3, resolution_log2 + 1):
71
+ vars += layer('%dx%d/Conv0' % (2**res, 2**res), relu, next(param_iter).transpose(2,3,1,0)[::-1,::-1])
72
+ vars += layer('%dx%d/Conv1' % (2**res, 2**res), relu, next(param_iter).transpose(2,3,1,0)[::-1,::-1])
73
+ for lod in range(0, resolution_log2 - 1):
74
+ vars += layer('ToRGB_lod%d' % lod, linear, next(param_iter)[np.newaxis, np.newaxis])
75
+
76
+ if func.startswith('D'):
77
+ vars += layer('FromRGB_lod0', relu, next(param_iter)[np.newaxis, np.newaxis])
78
+ for res in range(resolution_log2, 2, -1):
79
+ vars += layer('%dx%d/Conv0' % (2**res, 2**res), relu, next(param_iter).transpose(2,3,1,0)[::-1,::-1])
80
+ vars += layer('%dx%d/Conv1' % (2**res, 2**res), relu, next(param_iter).transpose(2,3,1,0)[::-1,::-1])
81
+ vars += layer('FromRGB_lod%d' % (resolution_log2 - (res - 1)), relu, next(param_iter)[np.newaxis, np.newaxis])
82
+ vars += layer('4x4/Conv', relu, next(param_iter).transpose(2,3,1,0)[::-1,::-1])
83
+ vars += layer('4x4/Dense0', relu, flatten2(next(param_iter)[:,:,::-1,::-1]).transpose())
84
+ vars += layer('4x4/Dense1', linear, next(param_iter))
85
+
86
+ vars += [('lod', state['toplevel_params']['cur_lod'])]
87
+
88
+ return {
89
+ 'version': 2,
90
+ 'name': func,
91
+ 'build_module_src': inspect.getsource(networks),
92
+ 'build_func_name': theano_gan_remap[func],
93
+ 'static_kwargs': spec,
94
+ 'variables': vars}
95
+
96
+ tfutil.network_import_handlers.append(patch_theano_gan)
97
+
98
+ #----------------------------------------------------------------------------
99
+ # Import handler for tfutil.Network that ignores unsupported/deprecated
100
+ # networks produced by older versions of the code.
101
+
102
+ def ignore_unknown_theano_network(state):
103
+ if 'version' in state:
104
+ return state
105
+
106
+ print('Ignoring unknown Theano network:', state['build_func_spec']['func'])
107
+ return {
108
+ 'version': 2,
109
+ 'name': 'Dummy',
110
+ 'build_module_src': 'def dummy(input, **kwargs): input.set_shape([None, 1]); return input',
111
+ 'build_func_name': 'dummy',
112
+ 'static_kwargs': {},
113
+ 'variables': []}
114
+
115
+ tfutil.network_import_handlers.append(ignore_unknown_theano_network)
116
+
117
+ #----------------------------------------------------------------------------
models/pggan_tf_official/loss.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
2
+ #
3
+ # This work is licensed under the Creative Commons Attribution-NonCommercial
4
+ # 4.0 International License. To view a copy of this license, visit
5
+ # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
6
+ # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
7
+
8
+ import numpy as np
9
+ import tensorflow as tf
10
+
11
+ import tfutil
12
+
13
+ #----------------------------------------------------------------------------
14
+ # Convenience func that casts all of its arguments to tf.float32.
15
+
16
+ def fp32(*values):
17
+ if len(values) == 1 and isinstance(values[0], tuple):
18
+ values = values[0]
19
+ values = tuple(tf.cast(v, tf.float32) for v in values)
20
+ return values if len(values) >= 2 else values[0]
21
+
22
+ #----------------------------------------------------------------------------
23
+ # Generator loss function used in the paper (WGAN + AC-GAN).
24
+
25
+ def G_wgan_acgan(G, D, opt, training_set, minibatch_size,
26
+ cond_weight = 1.0): # Weight of the conditioning term.
27
+
28
+ latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
29
+ labels = training_set.get_random_labels_tf(minibatch_size)
30
+ fake_images_out = G.get_output_for(latents, labels, is_training=True)
31
+ fake_scores_out, fake_labels_out = fp32(D.get_output_for(fake_images_out, is_training=True))
32
+ loss = -fake_scores_out
33
+
34
+ if D.output_shapes[1][1] > 0:
35
+ with tf.name_scope('LabelPenalty'):
36
+ label_penalty_fakes = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=fake_labels_out)
37
+ loss += label_penalty_fakes * cond_weight
38
+ return loss
39
+
40
+ #----------------------------------------------------------------------------
41
+ # Discriminator loss function used in the paper (WGAN-GP + AC-GAN).
42
+
43
+ def D_wgangp_acgan(G, D, opt, training_set, minibatch_size, reals, labels,
44
+ wgan_lambda = 10.0, # Weight for the gradient penalty term.
45
+ wgan_epsilon = 0.001, # Weight for the epsilon term, \epsilon_{drift}.
46
+ wgan_target = 1.0, # Target value for gradient magnitudes.
47
+ cond_weight = 1.0): # Weight of the conditioning terms.
48
+
49
+ latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
50
+ fake_images_out = G.get_output_for(latents, labels, is_training=True)
51
+ real_scores_out, real_labels_out = fp32(D.get_output_for(reals, is_training=True))
52
+ fake_scores_out, fake_labels_out = fp32(D.get_output_for(fake_images_out, is_training=True))
53
+ real_scores_out = tfutil.autosummary('Loss/real_scores', real_scores_out)
54
+ fake_scores_out = tfutil.autosummary('Loss/fake_scores', fake_scores_out)
55
+ loss = fake_scores_out - real_scores_out
56
+
57
+ with tf.name_scope('GradientPenalty'):
58
+ mixing_factors = tf.random_uniform([minibatch_size, 1, 1, 1], 0.0, 1.0, dtype=fake_images_out.dtype)
59
+ mixed_images_out = tfutil.lerp(tf.cast(reals, fake_images_out.dtype), fake_images_out, mixing_factors)
60
+ mixed_scores_out, mixed_labels_out = fp32(D.get_output_for(mixed_images_out, is_training=True))
61
+ mixed_scores_out = tfutil.autosummary('Loss/mixed_scores', mixed_scores_out)
62
+ mixed_loss = opt.apply_loss_scaling(tf.reduce_sum(mixed_scores_out))
63
+ mixed_grads = opt.undo_loss_scaling(fp32(tf.gradients(mixed_loss, [mixed_images_out])[0]))
64
+ mixed_norms = tf.sqrt(tf.reduce_sum(tf.square(mixed_grads), axis=[1,2,3]))
65
+ mixed_norms = tfutil.autosummary('Loss/mixed_norms', mixed_norms)
66
+ gradient_penalty = tf.square(mixed_norms - wgan_target)
67
+ loss += gradient_penalty * (wgan_lambda / (wgan_target**2))
68
+
69
+ with tf.name_scope('EpsilonPenalty'):
70
+ epsilon_penalty = tfutil.autosummary('Loss/epsilon_penalty', tf.square(real_scores_out))
71
+ loss += epsilon_penalty * wgan_epsilon
72
+
73
+ if D.output_shapes[1][1] > 0:
74
+ with tf.name_scope('LabelPenalty'):
75
+ label_penalty_reals = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=real_labels_out)
76
+ label_penalty_fakes = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=fake_labels_out)
77
+ label_penalty_reals = tfutil.autosummary('Loss/label_penalty_reals', label_penalty_reals)
78
+ label_penalty_fakes = tfutil.autosummary('Loss/label_penalty_fakes', label_penalty_fakes)
79
+ loss += (label_penalty_reals + label_penalty_fakes) * cond_weight
80
+ return loss
81
+
82
+ #----------------------------------------------------------------------------
models/pggan_tf_official/metrics/__init__.py ADDED
@@ -0,0 +1 @@
 
1
+ # empty
models/pggan_tf_official/metrics/frechet_inception_distance.py ADDED
@@ -0,0 +1,281 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ #
3
+ # Copyright 2017 Martin Heusel
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ # Adapted from the original implementation by Martin Heusel.
18
+ # Source https://github.com/bioinf-jku/TTUR/blob/master/fid.py
19
+
20
+ ''' Calculates the Frechet Inception Distance (FID) to evalulate GANs.
21
+
22
+ The FID metric calculates the distance between two distributions of images.
23
+ Typically, we have summary statistics (mean & covariance matrix) of one
24
+ of these distributions, while the 2nd distribution is given by a GAN.
25
+
26
+ When run as a stand-alone program, it compares the distribution of
27
+ images that are stored as PNG/JPEG at a specified location with a
28
+ distribution given by summary statistics (in pickle format).
29
+
30
+ The FID is calculated by assuming that X_1 and X_2 are the activations of
31
+ the pool_3 layer of the inception net for generated samples and real world
32
+ samples respectivly.
33
+
34
+ See --help to see further details.
35
+ '''
36
+
37
+ from __future__ import absolute_import, division, print_function
38
+ import numpy as np
39
+ import scipy as sp
40
+ import os
41
+ import gzip, pickle
42
+ import tensorflow as tf
43
+ from scipy.misc import imread
44
+ import pathlib
45
+ import urllib
46
+
47
+
48
+ class InvalidFIDException(Exception):
49
+ pass
50
+
51
+
52
+ def create_inception_graph(pth):
53
+ """Creates a graph from saved GraphDef file."""
54
+ # Creates graph from saved graph_def.pb.
55
+ with tf.gfile.FastGFile( pth, 'rb') as f:
56
+ graph_def = tf.GraphDef()
57
+ graph_def.ParseFromString( f.read())
58
+ _ = tf.import_graph_def( graph_def, name='FID_Inception_Net')
59
+ #-------------------------------------------------------------------------------
60
+
61
+
62
+ # code for handling inception net derived from
63
+ # https://github.com/openai/improved-gan/blob/master/inception_score/model.py
64
+ def _get_inception_layer(sess):
65
+ """Prepares inception net for batched usage and returns pool_3 layer. """
66
+ layername = 'FID_Inception_Net/pool_3:0'
67
+ pool3 = sess.graph.get_tensor_by_name(layername)
68
+ ops = pool3.graph.get_operations()
69
+ for op_idx, op in enumerate(ops):
70
+ for o in op.outputs:
71
+ shape = o.get_shape()
72
+ if shape._dims is not None:
73
+ shape = [s.value for s in shape]
74
+ new_shape = []
75
+ for j, s in enumerate(shape):
76
+ if s == 1 and j == 0:
77
+ new_shape.append(None)
78
+ else:
79
+ new_shape.append(s)
80
+ try:
81
+ o._shape = tf.TensorShape(new_shape)
82
+ except ValueError:
83
+ o._shape_val = tf.TensorShape(new_shape) # EDIT: added for compatibility with tensorflow 1.6.0
84
+ return pool3
85
+ #-------------------------------------------------------------------------------
86
+
87
+
88
+ def get_activations(images, sess, batch_size=50, verbose=False):
89
+ """Calculates the activations of the pool_3 layer for all images.
90
+
91
+ Params:
92
+ -- images : Numpy array of dimension (n_images, hi, wi, 3). The values
93
+ must lie between 0 and 256.
94
+ -- sess : current session
95
+ -- batch_size : the images numpy array is split into batches with batch size
96
+ batch_size. A reasonable batch size depends on the disposable hardware.
97
+ -- verbose : If set to True and parameter out_step is given, the number of calculated
98
+ batches is reported.
99
+ Returns:
100
+ -- A numpy array of dimension (num images, 2048) that contains the
101
+ activations of the given tensor when feeding inception with the query tensor.
102
+ """
103
+ inception_layer = _get_inception_layer(sess)
104
+ d0 = images.shape[0]
105
+ if batch_size > d0:
106
+ print("warning: batch size is bigger than the data size. setting batch size to data size")
107
+ batch_size = d0
108
+ n_batches = d0//batch_size
109
+ n_used_imgs = n_batches*batch_size
110
+ pred_arr = np.empty((n_used_imgs,2048))
111
+ for i in range(n_batches):
112
+ if verbose:
113
+ print("\rPropagating batch %d/%d" % (i+1, n_batches), end="", flush=True)
114
+ start = i*batch_size
115
+ end = start + batch_size
116
+ batch = images[start:end]
117
+ pred = sess.run(inception_layer, {'FID_Inception_Net/ExpandDims:0': batch})
118
+ pred_arr[start:end] = pred.reshape(batch_size,-1)
119
+ if verbose:
120
+ print(" done")
121
+ return pred_arr
122
+ #-------------------------------------------------------------------------------
123
+
124
+
125
+ def calculate_frechet_distance(mu1, sigma1, mu2, sigma2):
126
+ """Numpy implementation of the Frechet Distance.
127
+ The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
128
+ and X_2 ~ N(mu_2, C_2) is
129
+ d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
130
+
131
+ Params:
132
+ -- mu1 : Numpy array containing the activations of the pool_3 layer of the
133
+ inception net ( like returned by the function 'get_predictions')
134
+ -- mu2 : The sample mean over activations of the pool_3 layer, precalcualted
135
+ on an representive data set.
136
+ -- sigma2: The covariance matrix over activations of the pool_3 layer,
137
+ precalcualted on an representive data set.
138
+
139
+ Returns:
140
+ -- dist : The Frechet Distance.
141
+
142
+ Raises:
143
+ -- InvalidFIDException if nan occures.
144
+ """
145
+ m = np.square(mu1 - mu2).sum()
146
+ #s = sp.linalg.sqrtm(np.dot(sigma1, sigma2)) # EDIT: commented out
147
+ s, _ = sp.linalg.sqrtm(np.dot(sigma1, sigma2), disp=False) # EDIT: added
148
+ dist = m + np.trace(sigma1+sigma2 - 2*s)
149
+ #if np.isnan(dist): # EDIT: commented out
150
+ # raise InvalidFIDException("nan occured in distance calculation.") # EDIT: commented out
151
+ #return dist # EDIT: commented out
152
+ return np.real(dist) # EDIT: added
153
+ #-------------------------------------------------------------------------------
154
+
155
+
156
+ def calculate_activation_statistics(images, sess, batch_size=50, verbose=False):
157
+ """Calculation of the statistics used by the FID.
158
+ Params:
159
+ -- images : Numpy array of dimension (n_images, hi, wi, 3). The values
160
+ must lie between 0 and 255.
161
+ -- sess : current session
162
+ -- batch_size : the images numpy array is split into batches with batch size
163
+ batch_size. A reasonable batch size depends on the available hardware.
164
+ -- verbose : If set to True and parameter out_step is given, the number of calculated
165
+ batches is reported.
166
+ Returns:
167
+ -- mu : The mean over samples of the activations of the pool_3 layer of
168
+ the incption model.
169
+ -- sigma : The covariance matrix of the activations of the pool_3 layer of
170
+ the incption model.
171
+ """
172
+ act = get_activations(images, sess, batch_size, verbose)
173
+ mu = np.mean(act, axis=0)
174
+ sigma = np.cov(act, rowvar=False)
175
+ return mu, sigma
176
+ #-------------------------------------------------------------------------------
177
+
178
+
179
+ #-------------------------------------------------------------------------------
180
+ # The following functions aren't needed for calculating the FID
181
+ # they're just here to make this module work as a stand-alone script
182
+ # for calculating FID scores
183
+ #-------------------------------------------------------------------------------
184
+ def check_or_download_inception(inception_path):
185
+ ''' Checks if the path to the inception file is valid, or downloads
186
+ the file if it is not present. '''
187
+ INCEPTION_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
188
+ if inception_path is None:
189
+ inception_path = '/tmp'
190
+ inception_path = pathlib.Path(inception_path)
191
+ model_file = inception_path / 'classify_image_graph_def.pb'
192
+ if not model_file.exists():
193
+ print("Downloading Inception model")
194
+ from urllib import request
195
+ import tarfile
196
+ fn, _ = request.urlretrieve(INCEPTION_URL)
197
+ with tarfile.open(fn, mode='r') as f:
198
+ f.extract('classify_image_graph_def.pb', str(model_file.parent))
199
+ return str(model_file)
200
+
201
+
202
+ def _handle_path(path, sess):
203
+ if path.endswith('.npz'):
204
+ f = np.load(path)
205
+ m, s = f['mu'][:], f['sigma'][:]
206
+ f.close()
207
+ else:
208
+ path = pathlib.Path(path)
209
+ files = list(path.glob('*.jpg')) + list(path.glob('*.png'))
210
+ x = np.array([imread(str(fn)).astype(np.float32) for fn in files])
211
+ m, s = calculate_activation_statistics(x, sess)
212
+ return m, s
213
+
214
+
215
+ def calculate_fid_given_paths(paths, inception_path):
216
+ ''' Calculates the FID of two paths. '''
217
+ inception_path = check_or_download_inception(inception_path)
218
+
219
+ for p in paths:
220
+ if not os.path.exists(p):
221
+ raise RuntimeError("Invalid path: %s" % p)
222
+
223
+ create_inception_graph(str(inception_path))
224
+ with tf.Session() as sess:
225
+ sess.run(tf.global_variables_initializer())
226
+ m1, s1 = _handle_path(paths[0], sess)
227
+ m2, s2 = _handle_path(paths[1], sess)
228
+ fid_value = calculate_frechet_distance(m1, s1, m2, s2)
229
+ return fid_value
230
+
231
+
232
+ if __name__ == "__main__":
233
+ from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
234
+ parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
235
+ parser.add_argument("path", type=str, nargs=2,
236
+ help='Path to the generated images or to .npz statistic files')
237
+ parser.add_argument("-i", "--inception", type=str, default=None,
238
+ help='Path to Inception model (will be downloaded if not provided)')
239
+ parser.add_argument("--gpu", default="", type=str,
240
+ help='GPU to use (leave blank for CPU only)')
241
+ args = parser.parse_args()
242
+ os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
243
+ fid_value = calculate_fid_given_paths(args.path, args.inception)
244
+ print("FID: ", fid_value)
245
+
246
+ #----------------------------------------------------------------------------
247
+ # EDIT: added
248
+
249
+ class API:
250
+ def __init__(self, num_images, image_shape, image_dtype, minibatch_size):
251
+ import config
252
+ self.network_dir = os.path.join(config.result_dir, '_inception_fid')
253
+ self.network_file = check_or_download_inception(self.network_dir)
254
+ self.sess = tf.get_default_session()
255
+ create_inception_graph(self.network_file)
256
+
257
+ def get_metric_names(self):
258
+ return ['FID']
259
+
260
+ def get_metric_formatting(self):
261
+ return ['%-10.4f']
262
+
263
+ def begin(self, mode):
264
+ assert mode in ['warmup', 'reals', 'fakes']
265
+ self.activations = []
266
+
267
+ def feed(self, mode, minibatch):
268
+ act = get_activations(minibatch.transpose(0,2,3,1), self.sess, batch_size=minibatch.shape[0])
269
+ self.activations.append(act)
270
+
271
+ def end(self, mode):
272
+ act = np.concatenate(self.activations)
273
+ mu = np.mean(act, axis=0)
274
+ sigma = np.cov(act, rowvar=False)
275
+ if mode in ['warmup', 'reals']:
276
+ self.mu_real = mu
277
+ self.sigma_real = sigma
278
+ fid = calculate_frechet_distance(mu, sigma, self.mu_real, self.sigma_real)
279
+ return [fid]
280
+
281
+ #----------------------------------------------------------------------------
models/pggan_tf_official/metrics/inception_score.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2016 Wojciech Zaremba
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Adapted from the original implementation by Wojciech Zaremba.
16
+ # Source: https://github.com/openai/improved-gan/blob/master/inception_score/model.py
17
+
18
+ from __future__ import absolute_import
19
+ from __future__ import division
20
+ from __future__ import print_function
21
+
22
+ import os.path
23
+ import sys
24
+ import tarfile
25
+
26
+ import numpy as np
27
+ from six.moves import urllib
28
+ import tensorflow as tf
29
+ import glob
30
+ import scipy.misc
31
+ import math
32
+ import sys
33
+
34
+ MODEL_DIR = '/tmp/imagenet'
35
+
36
+ DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
37
+ softmax = None
38
+
39
+ # Call this function with list of images. Each of elements should be a
40
+ # numpy array with values ranging from 0 to 255.
41
+ def get_inception_score(images, splits=10):
42
+ assert(type(images) == list)
43
+ assert(type(images[0]) == np.ndarray)
44
+ assert(len(images[0].shape) == 3)
45
+ #assert(np.max(images[0]) > 10) # EDIT: commented out
46
+ #assert(np.min(images[0]) >= 0.0)
47
+ inps = []
48
+ for img in images:
49
+ img = img.astype(np.float32)
50
+ inps.append(np.expand_dims(img, 0))
51
+ bs = 100
52
+ with tf.Session() as sess:
53
+ preds = []
54
+ n_batches = int(math.ceil(float(len(inps)) / float(bs)))
55
+ for i in range(n_batches):
56
+ #sys.stdout.write(".") # EDIT: commented out
57
+ #sys.stdout.flush()
58
+ inp = inps[(i * bs):min((i + 1) * bs, len(inps))]
59
+ inp = np.concatenate(inp, 0)
60
+ pred = sess.run(softmax, {'ExpandDims:0': inp})
61
+ preds.append(pred)
62
+ preds = np.concatenate(preds, 0)
63
+ scores = []
64
+ for i in range(splits):
65
+ part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :]
66
+ kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))
67
+ kl = np.mean(np.sum(kl, 1))
68
+ scores.append(np.exp(kl))
69
+ return np.mean(scores), np.std(scores)
70
+
71
+ # This function is called automatically.
72
+ def _init_inception():
73
+ global softmax
74
+ if not os.path.exists(MODEL_DIR):
75
+ os.makedirs(MODEL_DIR)
76
+ filename = DATA_URL.split('/')[-1]
77
+ filepath = os.path.join(MODEL_DIR, filename)
78
+ if not os.path.exists(filepath):
79
+ def _progress(count, block_size, total_size):
80
+ sys.stdout.write('\r>> Downloading %s %.1f%%' % (
81
+ filename, float(count * block_size) / float(total_size) * 100.0))
82
+ sys.stdout.flush()
83
+ filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
84
+ print()
85
+ statinfo = os.stat(filepath)
86
+ print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
87
+ tarfile.open(filepath, 'r:gz').extractall(MODEL_DIR) # EDIT: increased indent
88
+ with tf.gfile.FastGFile(os.path.join(
89
+ MODEL_DIR, 'classify_image_graph_def.pb'), 'rb') as f:
90
+ graph_def = tf.GraphDef()
91
+ graph_def.ParseFromString(f.read())
92
+ _ = tf.import_graph_def(graph_def, name='')
93
+ # Works with an arbitrary minibatch size.
94
+ with tf.Session() as sess:
95
+ pool3 = sess.graph.get_tensor_by_name('pool_3:0')
96
+ ops = pool3.graph.get_operations()
97
+ for op_idx, op in enumerate(ops):
98
+ for o in op.outputs:
99
+ shape = o.get_shape()
100
+ shape = [s.value for s in shape]
101
+ new_shape = []
102
+ for j, s in enumerate(shape):
103
+ if s == 1 and j == 0:
104
+ new_shape.append(None)
105
+ else:
106
+ new_shape.append(s)
107
+ try:
108
+ o._shape = tf.TensorShape(new_shape)
109
+ except ValueError:
110
+ o._shape_val = tf.TensorShape(new_shape) # EDIT: added for compatibility with tensorflow 1.6.0
111
+ w = sess.graph.get_operation_by_name("softmax/logits/MatMul").inputs[1]
112
+ logits = tf.matmul(tf.squeeze(pool3), w)
113
+ softmax = tf.nn.softmax(logits)
114
+
115
+ #if softmax is None: # EDIT: commented out
116
+ # _init_inception() # EDIT: commented out
117
+
118
+ #----------------------------------------------------------------------------
119
+ # EDIT: added
120
+
121
+ class API:
122
+ def __init__(self, num_images, image_shape, image_dtype, minibatch_size):
123
+ import config
124
+ globals()['MODEL_DIR'] = os.path.join(config.result_dir, '_inception')
125
+ self.sess = tf.get_default_session()
126
+ _init_inception()
127
+
128
+ def get_metric_names(self):
129
+ return ['IS_mean', 'IS_std']
130
+
131
+ def get_metric_formatting(self):
132
+ return ['%-10.4f', '%-10.4f']
133
+
134
+ def begin(self, mode):
135
+ assert mode in ['warmup', 'reals', 'fakes']
136
+ self.images = []
137
+
138
+ def feed(self, mode, minibatch):
139
+ self.images.append(minibatch.transpose(0, 2, 3, 1))
140
+
141
+ def end(self, mode):
142
+ images = list(np.concatenate(self.images))
143
+ with self.sess.as_default():
144
+ mean, std = get_inception_score(images)
145
+ return [mean, std]
146
+
147
+ #----------------------------------------------------------------------------
models/pggan_tf_official/metrics/ms_ssim.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python
2
+ #
3
+ # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ # ==============================================================================
17
+
18
+ # Adapted from the original implementation by The TensorFlow Authors.
19
+ # Source: https://github.com/tensorflow/models/blob/master/research/compression/image_encoder/msssim.py
20
+
21
+ import numpy as np
22
+ from scipy import signal
23
+ from scipy.ndimage.filters import convolve
24
+
25
+ def _FSpecialGauss(size, sigma):
26
+ """Function to mimic the 'fspecial' gaussian MATLAB function."""
27
+ radius = size // 2
28
+ offset = 0.0
29
+ start, stop = -radius, radius + 1
30
+ if size % 2 == 0:
31
+ offset = 0.5
32
+ stop -= 1
33
+ x, y = np.mgrid[offset + start:stop, offset + start:stop]
34
+ assert len(x) == size
35
+ g = np.exp(-((x**2 + y**2)/(2.0 * sigma**2)))
36
+ return g / g.sum()
37
+
38
+ def _SSIMForMultiScale(img1, img2, max_val=255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03):
39
+ """Return the Structural Similarity Map between `img1` and `img2`.
40
+
41
+ This function attempts to match the functionality of ssim_index_new.m by
42
+ Zhou Wang: http://www.cns.nyu.edu/~lcv/ssim/msssim.zip
43
+
44
+ Arguments:
45
+ img1: Numpy array holding the first RGB image batch.
46
+ img2: Numpy array holding the second RGB image batch.
47
+ max_val: the dynamic range of the images (i.e., the difference between the
48
+ maximum the and minimum allowed values).
49
+ filter_size: Size of blur kernel to use (will be reduced for small images).
50
+ filter_sigma: Standard deviation for Gaussian blur kernel (will be reduced
51
+ for small images).
52
+ k1: Constant used to maintain stability in the SSIM calculation (0.01 in
53
+ the original paper).
54
+ k2: Constant used to maintain stability in the SSIM calculation (0.03 in
55
+ the original paper).
56
+
57
+ Returns:
58
+ Pair containing the mean SSIM and contrast sensitivity between `img1` and
59
+ `img2`.
60
+
61
+ Raises:
62
+ RuntimeError: If input images don't have the same shape or don't have four
63
+ dimensions: [batch_size, height, width, depth].
64
+ """
65
+ if img1.shape != img2.shape:
66
+ raise RuntimeError('Input images must have the same shape (%s vs. %s).' % (img1.shape, img2.shape))
67
+ if img1.ndim != 4:
68
+ raise RuntimeError('Input images must have four dimensions, not %d' % img1.ndim)
69
+
70
+ img1 = img1.astype(np.float32)
71
+ img2 = img2.astype(np.float32)
72
+ _, height, width, _ = img1.shape
73
+
74
+ # Filter size can't be larger than height or width of images.
75
+ size = min(filter_size, height, width)
76
+
77
+ # Scale down sigma if a smaller filter size is used.
78
+ sigma = size * filter_sigma / filter_size if filter_size else 0
79
+
80
+ if filter_size:
81
+ window = np.reshape(_FSpecialGauss(size, sigma), (1, size, size, 1))
82
+ mu1 = signal.fftconvolve(img1, window, mode='valid')
83
+ mu2 = signal.fftconvolve(img2, window, mode='valid')
84
+ sigma11 = signal.fftconvolve(img1 * img1, window, mode='valid')
85
+ sigma22 = signal.fftconvolve(img2 * img2, window, mode='valid')
86
+ sigma12 = signal.fftconvolve(img1 * img2, window, mode='valid')
87
+ else:
88
+ # Empty blur kernel so no need to convolve.
89
+ mu1, mu2 = img1, img2
90
+ sigma11 = img1 * img1
91
+ sigma22 = img2 * img2
92
+ sigma12 = img1 * img2
93
+
94
+ mu11 = mu1 * mu1
95
+ mu22 = mu2 * mu2
96
+ mu12 = mu1 * mu2
97
+ sigma11 -= mu11
98
+ sigma22 -= mu22
99
+ sigma12 -= mu12
100
+
101
+ # Calculate intermediate values used by both ssim and cs_map.
102
+ c1 = (k1 * max_val) ** 2
103
+ c2 = (k2 * max_val) ** 2
104
+ v1 = 2.0 * sigma12 + c2
105
+ v2 = sigma11 + sigma22 + c2
106
+ ssim = np.mean((((2.0 * mu12 + c1) * v1) / ((mu11 + mu22 + c1) * v2)), axis=(1, 2, 3)) # Return for each image individually.
107
+ cs = np.mean(v1 / v2, axis=(1, 2, 3))
108
+ return ssim, cs
109
+
110
+ def _HoxDownsample(img):
111
+ return (img[:, 0::2, 0::2, :] + img[:, 1::2, 0::2, :] + img[:, 0::2, 1::2, :] + img[:, 1::2, 1::2, :]) * 0.25
112
+
113
+ def msssim(img1, img2, max_val=255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03, weights=None):
114
+ """Return the MS-SSIM score between `img1` and `img2`.
115
+
116
+ This function implements Multi-Scale Structural Similarity (MS-SSIM) Image
117
+ Quality Assessment according to Zhou Wang's paper, "Multi-scale structural
118
+ similarity for image quality assessment" (2003).
119
+ Link: https://ece.uwaterloo.ca/~z70wang/publications/msssim.pdf
120
+
121
+ Author's MATLAB implementation:
122
+ http://www.cns.nyu.edu/~lcv/ssim/msssim.zip
123
+
124
+ Arguments:
125
+ img1: Numpy array holding the first RGB image batch.
126
+ img2: Numpy array holding the second RGB image batch.
127
+ max_val: the dynamic range of the images (i.e., the difference between the
128
+ maximum the and minimum allowed values).
129
+ filter_size: Size of blur kernel to use (will be reduced for small images).
130
+ filter_sigma: Standard deviation for Gaussian blur kernel (will be reduced
131
+ for small images).
132
+ k1: Constant used to maintain stability in the SSIM calculation (0.01 in
133
+ the original paper).
134
+ k2: Constant used to maintain stability in the SSIM calculation (0.03 in
135
+ the original paper).
136
+ weights: List of weights for each level; if none, use five levels and the
137
+ weights from the original paper.
138
+
139
+ Returns:
140
+ MS-SSIM score between `img1` and `img2`.
141
+
142
+ Raises:
143
+ RuntimeError: If input images don't have the same shape or don't have four
144
+ dimensions: [batch_size, height, width, depth].
145
+ """
146
+ if img1.shape != img2.shape:
147
+ raise RuntimeError('Input images must have the same shape (%s vs. %s).' % (img1.shape, img2.shape))
148
+ if img1.ndim != 4:
149
+ raise RuntimeError('Input images must have four dimensions, not %d' % img1.ndim)
150
+
151
+ # Note: default weights don't sum to 1.0 but do match the paper / matlab code.
152
+ weights = np.array(weights if weights else [0.0448, 0.2856, 0.3001, 0.2363, 0.1333])
153
+ levels = weights.size
154
+ downsample_filter = np.ones((1, 2, 2, 1)) / 4.0
155
+ im1, im2 = [x.astype(np.float32) for x in [img1, img2]]
156
+ mssim = []
157
+ mcs = []
158
+ for _ in range(levels):
159
+ ssim, cs = _SSIMForMultiScale(
160
+ im1, im2, max_val=max_val, filter_size=filter_size,
161
+ filter_sigma=filter_sigma, k1=k1, k2=k2)
162
+ mssim.append(ssim)
163
+ mcs.append(cs)
164
+ im1, im2 = [_HoxDownsample(x) for x in [im1, im2]]
165
+
166
+ # Clip to zero. Otherwise we get NaNs.
167
+ mssim = np.clip(np.asarray(mssim), 0.0, np.inf)
168
+ mcs = np.clip(np.asarray(mcs), 0.0, np.inf)
169
+
170
+ # Average over images only at the end.
171
+ return np.mean(np.prod(mcs[:-1, :] ** weights[:-1, np.newaxis], axis=0) * (mssim[-1, :] ** weights[-1]))
172
+
173
+ #----------------------------------------------------------------------------
174
+ # EDIT: added
175
+
176
+ class API:
177
+ def __init__(self, num_images, image_shape, image_dtype, minibatch_size):
178
+ assert num_images % 2 == 0 and minibatch_size % 2 == 0
179
+ self.num_pairs = num_images // 2
180
+
181
+ def get_metric_names(self):
182
+ return ['MS-SSIM']
183
+
184
+ def get_metric_formatting(self):
185
+ return ['%-10.4f']
186
+
187
+ def begin(self, mode):
188
+ assert mode in ['warmup', 'reals', 'fakes']
189
+ self.sum = 0.0
190
+
191
+ def feed(self, mode, minibatch):
192
+ images = minibatch.transpose(0, 2, 3, 1)
193
+ score = msssim(images[0::2], images[1::2])
194
+ self.sum += score * (images.shape[0] // 2)
195
+
196
+ def end(self, mode):
197
+ avg = self.sum / self.num_pairs
198
+ return [avg]
199
+
200
+ #----------------------------------------------------------------------------
models/pggan_tf_official/metrics/sliced_wasserstein.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
2
+ #
3
+ # This work is licensed under the Creative Commons Attribution-NonCommercial
4
+ # 4.0 International License. To view a copy of this license, visit
5
+ # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
6
+ # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
7
+
8
+ import numpy as np
9
+ import scipy.ndimage
10
+
11
+ #----------------------------------------------------------------------------
12
+
13
+ def get_descriptors_for_minibatch(minibatch, nhood_size, nhoods_per_image):
14
+ S = minibatch.shape # (minibatch, channel, height, width)
15
+ assert len(S) == 4 and S[1] == 3
16
+ N = nhoods_per_image * S[0]
17
+ H = nhood_size // 2
18
+ nhood, chan, x, y = np.ogrid[0:N, 0:3, -H:H+1, -H:H+1]
19
+ img = nhood // nhoods_per_image
20
+ x = x + np.random.randint(H, S[3] - H, size=(N, 1, 1, 1))
21
+ y = y + np.random.randint(H, S[2] - H, size=(N, 1, 1, 1))
22
+ idx = ((img * S[1] + chan) * S[2] + y) * S[3] + x
23
+ return minibatch.flat[idx]
24
+
25
+ #----------------------------------------------------------------------------
26
+
27
+ def finalize_descriptors(desc):
28
+ if isinstance(desc, list):
29
+ desc = np.concatenate(desc, axis=0)
30
+ assert desc.ndim == 4 # (neighborhood, channel, height, width)
31
+ desc -= np.mean(desc, axis=(0, 2, 3), keepdims=True)
32
+ desc /= np.std(desc, axis=(0, 2, 3), keepdims=True)
33
+ desc = desc.reshape(desc.shape[0], -1)
34
+ return desc
35
+
36
+ #----------------------------------------------------------------------------
37
+
38
+ def sliced_wasserstein(A, B, dir_repeats, dirs_per_repeat):
39
+ assert A.ndim == 2 and A.shape == B.shape # (neighborhood, descriptor_component)
40
+ results = []
41
+ for repeat in range(dir_repeats):
42
+ dirs = np.random.randn(A.shape[1], dirs_per_repeat) # (descriptor_component, direction)
43
+ dirs /= np.sqrt(np.sum(np.square(dirs), axis=0, keepdims=True)) # normalize descriptor components for each direction
44
+ dirs = dirs.astype(np.float32)
45
+ projA = np.matmul(A, dirs) # (neighborhood, direction)
46
+ projB = np.matmul(B, dirs)
47
+ projA = np.sort(projA, axis=0) # sort neighborhood projections for each direction
48
+ projB = np.sort(projB, axis=0)
49
+ dists = np.abs(projA - projB) # pointwise wasserstein distances
50
+ results.append(np.mean(dists)) # average over neighborhoods and directions
51
+ return np.mean(results) # average over repeats
52
+
53
+ #----------------------------------------------------------------------------
54
+
55
+ def downscale_minibatch(minibatch, lod):
56
+ if lod == 0:
57
+ return minibatch
58
+ t = minibatch.astype(np.float32)
59
+ for i in range(lod):
60
+ t = (t[:, :, 0::2, 0::2] + t[:, :, 0::2, 1::2] + t[:, :, 1::2, 0::2] + t[:, :, 1::2, 1::2]) * 0.25
61
+ return np.round(t).clip(0, 255).astype(np.uint8)
62
+
63
+ #----------------------------------------------------------------------------
64
+
65
+ gaussian_filter = np.float32([
66
+ [1, 4, 6, 4, 1],
67
+ [4, 16, 24, 16, 4],
68
+ [6, 24, 36, 24, 6],
69
+ [4, 16, 24, 16, 4],
70
+ [1, 4, 6, 4, 1]]) / 256.0
71
+
72
+ def pyr_down(minibatch): # matches cv2.pyrDown()
73
+ assert minibatch.ndim == 4
74
+ return scipy.ndimage.convolve(minibatch, gaussian_filter[np.newaxis, np.newaxis, :, :], mode='mirror')[:, :, ::2, ::2]
75
+
76
+ def pyr_up(minibatch): # matches cv2.pyrUp()
77
+ assert minibatch.ndim == 4
78
+ S = minibatch.shape
79
+ res = np.zeros((S[0], S[1], S[2] * 2, S[3] * 2), minibatch.dtype)
80
+ res[:, :, ::2, ::2] = minibatch
81
+ return scipy.ndimage.convolve(res, gaussian_filter[np.newaxis, np.newaxis, :, :] * 4.0, mode='mirror')
82
+
83
+ def generate_laplacian_pyramid(minibatch, num_levels):
84
+ pyramid = [np.float32(minibatch)]
85
+ for i in range(1, num_levels):
86
+ pyramid.append(pyr_down(pyramid[-1]))
87
+ pyramid[-2] -= pyr_up(pyramid[-1])
88
+ return pyramid
89
+
90
+ def reconstruct_laplacian_pyramid(pyramid):
91
+ minibatch = pyramid[-1]
92
+ for level in pyramid[-2::-1]:
93
+ minibatch = pyr_up(minibatch) + level
94
+ return minibatch
95
+
96
+ #----------------------------------------------------------------------------
97
+
98
+ class API:
99
+ def __init__(self, num_images, image_shape, image_dtype, minibatch_size):
100
+ self.nhood_size = 7
101
+ self.nhoods_per_image = 128
102
+ self.dir_repeats = 4
103
+ self.dirs_per_repeat = 128
104
+ self.resolutions = []
105
+ res = image_shape[1]
106
+ while res >= 16:
107
+ self.resolutions.append(res)
108
+ res //= 2
109
+
110
+ def get_metric_names(self):
111
+ return ['SWDx1e3_%d' % res for res in self.resolutions] + ['SWDx1e3_avg']
112
+
113
+ def get_metric_formatting(self):
114
+ return ['%-13.4f'] * len(self.get_metric_names())
115
+
116
+ def begin(self, mode):
117
+ assert mode in ['warmup', 'reals', 'fakes']
118
+ self.descriptors = [[] for res in self.resolutions]
119
+
120
+ def feed(self, mode, minibatch):
121
+ for lod, level in enumerate(generate_laplacian_pyramid(minibatch, len(self.resolutions))):
122
+ desc = get_descriptors_for_minibatch(level, self.nhood_size, self.nhoods_per_image)
123
+ self.descriptors[lod].append(desc)
124
+
125
+ def end(self, mode):
126
+ desc = [finalize_descriptors(d) for d in self.descriptors]
127
+ del self.descriptors
128
+ if mode in ['warmup', 'reals']:
129
+ self.desc_real = desc
130
+ dist = [sliced_wasserstein(dreal, dfake, self.dir_repeats, self.dirs_per_repeat) for dreal, dfake in zip(self.desc_real, desc)]
131
+ del desc
132
+ dist = [d * 1e3 for d in dist] # multiply by 10^3
133
+ return dist + [np.mean(dist)]
134
+
135
+ #----------------------------------------------------------------------------
models/pggan_tf_official/misc.py ADDED
@@ -0,0 +1,344 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
2
+ #
3
+ # This work is licensed under the Creative Commons Attribution-NonCommercial
4
+ # 4.0 International License. To view a copy of this license, visit
5
+ # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
6
+ # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
7
+
8
+ import os
9
+ import sys
10
+ import glob
11
+ import datetime
12
+ import pickle
13
+ import re
14
+ import numpy as np
15
+ from collections import OrderedDict
16
+ import scipy.ndimage
17
+ import PIL.Image
18
+
19
+ import config
20
+ import dataset
21
+ import legacy
22
+
23
+ #----------------------------------------------------------------------------
24
+ # Convenience wrappers for pickle that are able to load data produced by
25
+ # older versions of the code.
26
+
27
+ def load_pkl(filename):
28
+ with open(filename, 'rb') as file:
29
+ return legacy.LegacyUnpickler(file, encoding='latin1').load()
30
+
31
+ def save_pkl(obj, filename):
32
+ with open(filename, 'wb') as file:
33
+ pickle.dump(obj, file, protocol=pickle.HIGHEST_PROTOCOL)
34
+
35
+ #----------------------------------------------------------------------------
36
+ # Image utils.
37
+
38
+ def adjust_dynamic_range(data, drange_in, drange_out):
39
+ if drange_in != drange_out:
40
+ scale = (np.float32(drange_out[1]) - np.float32(drange_out[0])) / (np.float32(drange_in[1]) - np.float32(drange_in[0]))
41
+ bias = (np.float32(drange_out[0]) - np.float32(drange_in[0]) * scale)
42
+ data = data * scale + bias
43
+ return data
44
+
45
+ def create_image_grid(images, grid_size=None):
46
+ assert images.ndim == 3 or images.ndim == 4
47
+ num, img_w, img_h = images.shape[0], images.shape[-1], images.shape[-2]
48
+
49
+ if grid_size is not None:
50
+ grid_w, grid_h = tuple(grid_size)
51
+ else:
52
+ grid_w = max(int(np.ceil(np.sqrt(num))), 1)
53
+ grid_h = max((num - 1) // grid_w + 1, 1)
54
+
55
+ grid = np.zeros(list(images.shape[1:-2]) + [grid_h * img_h, grid_w * img_w], dtype=images.dtype)
56
+ for idx in range(num):
57
+ x = (idx % grid_w) * img_w
58
+ y = (idx // grid_w) * img_h
59
+ grid[..., y : y + img_h, x : x + img_w] = images[idx]
60
+ return grid
61
+
62
+ def convert_to_pil_image(image, drange=[0,1]):
63
+ assert image.ndim == 2 or image.ndim == 3
64
+ if image.ndim == 3:
65
+ if image.shape[0] == 1:
66
+ image = image[0] # grayscale CHW => HW
67
+ else:
68
+ image = image.transpose(1, 2, 0) # CHW -> HWC
69
+
70
+ image = adjust_dynamic_range(image, drange, [0,255])
71
+ image = np.rint(image).clip(0, 255).astype(np.uint8)
72
+ format = 'RGB' if image.ndim == 3 else 'L'
73
+ return PIL.Image.fromarray(image, format)
74
+
75
+ def save_image(image, filename, drange=[0,1], quality=95):
76
+ img = convert_to_pil_image(image, drange)
77
+ if '.jpg' in filename:
78
+ img.save(filename,"JPEG", quality=quality, optimize=True)
79
+ else:
80
+ img.save(filename)
81
+
82
+ def save_image_grid(images, filename, drange=[0,1], grid_size=None):
83
+ convert_to_pil_image(create_image_grid(images, grid_size), drange).save(filename)
84
+
85
+ #----------------------------------------------------------------------------
86
+ # Logging of stdout and stderr to a file.
87
+
88
+ class OutputLogger(object):
89
+ def __init__(self):
90
+ self.file = None
91
+ self.buffer = ''
92
+
93
+ def set_log_file(self, filename, mode='wt'):
94
+ assert self.file is None
95
+ self.file = open(filename, mode)
96
+ if self.buffer is not None:
97
+ self.file.write(self.buffer)
98
+ self.buffer = None
99
+
100
+ def write(self, data):
101
+ if self.file is not None:
102
+ self.file.write(data)
103
+ if self.buffer is not None:
104
+ self.buffer += data
105
+
106
+ def flush(self):
107
+ if self.file is not None:
108
+ self.file.flush()
109
+
110
+ class TeeOutputStream(object):
111
+ def __init__(self, child_streams, autoflush=False):
112
+ self.child_streams = child_streams
113
+ self.autoflush = autoflush
114
+
115
+ def write(self, data):
116
+ for stream in self.child_streams:
117
+ stream.write(data)
118
+ if self.autoflush:
119
+ self.flush()
120
+
121
+ def flush(self):
122
+ for stream in self.child_streams:
123
+ stream.flush()
124
+
125
+ output_logger = None
126
+
127
+ def init_output_logging():
128
+ global output_logger
129
+ if output_logger is None:
130
+ output_logger = OutputLogger()
131
+ sys.stdout = TeeOutputStream([sys.stdout, output_logger], autoflush=True)
132
+ sys.stderr = TeeOutputStream([sys.stderr, output_logger], autoflush=True)
133
+
134
+ def set_output_log_file(filename, mode='wt'):
135
+ if output_logger is not None:
136
+ output_logger.set_log_file(filename, mode)
137
+
138
+ #----------------------------------------------------------------------------
139
+ # Reporting results.
140
+
141
+ def create_result_subdir(result_dir, desc):
142
+
143
+ # Select run ID and create subdir.
144
+ while True:
145
+ run_id = 0
146
+ for fname in glob.glob(os.path.join(result_dir, '*')):
147
+ try:
148
+ fbase = os.path.basename(fname)
149
+ ford = int(fbase[:fbase.find('-')])
150
+ run_id = max(run_id, ford + 1)
151
+ except ValueError:
152
+ pass
153
+
154
+ result_subdir = os.path.join(result_dir, '%03d-%s' % (run_id, desc))
155
+ try:
156
+ os.makedirs(result_subdir)
157
+ break
158
+ except OSError:
159
+ if os.path.isdir(result_subdir):
160
+ continue
161
+ raise
162
+
163
+ print("Saving results to", result_subdir)
164
+ set_output_log_file(os.path.join(result_subdir, 'log.txt'))
165
+
166
+ # Export config.
167
+ try:
168
+ with open(os.path.join(result_subdir, 'config.txt'), 'wt') as fout:
169
+ for k, v in sorted(config.__dict__.items()):
170
+ if not k.startswith('_'):
171
+ fout.write("%s = %s\n" % (k, str(v)))
172
+ except:
173
+ pass
174
+
175
+ return result_subdir
176
+
177
+ def format_time(seconds):
178
+ s = int(np.rint(seconds))
179
+ if s < 60: return '%ds' % (s)
180
+ elif s < 60*60: return '%dm %02ds' % (s // 60, s % 60)
181
+ elif s < 24*60*60: return '%dh %02dm %02ds' % (s // (60*60), (s // 60) % 60, s % 60)
182
+ else: return '%dd %02dh %02dm' % (s // (24*60*60), (s // (60*60)) % 24, (s // 60) % 60)
183
+
184
+ #----------------------------------------------------------------------------
185
+ # Locating results.
186
+
187
+ def locate_result_subdir(run_id_or_result_subdir):
188
+ if isinstance(run_id_or_result_subdir, str) and os.path.isdir(run_id_or_result_subdir):
189
+ return run_id_or_result_subdir
190
+
191
+ searchdirs = []
192
+ searchdirs += ['']
193
+ searchdirs += ['results']
194
+ searchdirs += ['networks']
195
+
196
+ for searchdir in searchdirs:
197
+ dir = config.result_dir if searchdir == '' else os.path.join(config.result_dir, searchdir)
198
+ dir = os.path.join(dir, str(run_id_or_result_subdir))
199
+ if os.path.isdir(dir):
200
+ return dir
201
+ prefix = '%03d' % run_id_or_result_subdir if isinstance(run_id_or_result_subdir, int) else str(run_id_or_result_subdir)
202
+ dirs = sorted(glob.glob(os.path.join(config.result_dir, searchdir, prefix + '-*')))
203
+ dirs = [dir for dir in dirs if os.path.isdir(dir)]
204
+ if len(dirs) == 1:
205
+ return dirs[0]
206
+ raise IOError('Cannot locate result subdir for run', run_id_or_result_subdir)
207
+
208
+ def list_network_pkls(run_id_or_result_subdir, include_final=True):
209
+ result_subdir = locate_result_subdir(run_id_or_result_subdir)
210
+ pkls = sorted(glob.glob(os.path.join(result_subdir, 'network-*.pkl')))
211
+ if len(pkls) >= 1 and os.path.basename(pkls[0]) == 'network-final.pkl':
212
+ if include_final:
213
+ pkls.append(pkls[0])
214
+ del pkls[0]
215
+ return pkls
216
+
217
+ def locate_network_pkl(run_id_or_result_subdir_or_network_pkl, snapshot=None):
218
+ if isinstance(run_id_or_result_subdir_or_network_pkl, str) and os.path.isfile(run_id_or_result_subdir_or_network_pkl):
219
+ return run_id_or_result_subdir_or_network_pkl
220
+
221
+ pkls = list_network_pkls(run_id_or_result_subdir_or_network_pkl)
222
+ if len(pkls) >= 1 and snapshot is None:
223
+ return pkls[-1]
224
+ for pkl in pkls:
225
+ try:
226
+ name = os.path.splitext(os.path.basename(pkl))[0]
227
+ number = int(name.split('-')[-1])
228
+ if number == snapshot:
229
+ return pkl
230
+ except ValueError: pass
231
+ except IndexError: pass
232
+ raise IOError('Cannot locate network pkl for snapshot', snapshot)
233
+
234
+ def get_id_string_for_network_pkl(network_pkl):
235
+ p = network_pkl.replace('.pkl', '').replace('\\', '/').split('/')
236
+ return '-'.join(p[max(len(p) - 2, 0):])
237
+
238
+ #----------------------------------------------------------------------------
239
+ # Loading and using trained networks.
240
+
241
+ def load_network_pkl(run_id_or_result_subdir_or_network_pkl, snapshot=None):
242
+ return load_pkl(locate_network_pkl(run_id_or_result_subdir_or_network_pkl, snapshot))
243
+
244
+ def random_latents(num_latents, G, random_state=None):
245
+ if random_state is not None:
246
+ return random_state.randn(num_latents, *G.input_shape[1:]).astype(np.float32)
247
+ else:
248
+ return np.random.randn(num_latents, *G.input_shape[1:]).astype(np.float32)
249
+
250
+ def load_dataset_for_previous_run(run_id, **kwargs): # => dataset_obj, mirror_augment
251
+ result_subdir = locate_result_subdir(run_id)
252
+
253
+ # Parse config.txt.
254
+ parsed_cfg = dict()
255
+ with open(os.path.join(result_subdir, 'config.txt'), 'rt') as f:
256
+ for line in f:
257
+ if line.startswith('dataset =') or line.startswith('train ='):
258
+ exec(line, parsed_cfg, parsed_cfg)
259
+ dataset_cfg = parsed_cfg.get('dataset', dict())
260
+ train_cfg = parsed_cfg.get('train', dict())
261
+ mirror_augment = train_cfg.get('mirror_augment', False)
262
+
263
+ # Handle legacy options.
264
+ if 'h5_path' in dataset_cfg:
265
+ dataset_cfg['tfrecord_dir'] = dataset_cfg.pop('h5_path').replace('.h5', '')
266
+ if 'mirror_augment' in dataset_cfg:
267
+ mirror_augment = dataset_cfg.pop('mirror_augment')
268
+ if 'max_labels' in dataset_cfg:
269
+ v = dataset_cfg.pop('max_labels')
270
+ if v is None: v = 0
271
+ if v == 'all': v = 'full'
272
+ dataset_cfg['max_label_size'] = v
273
+ if 'max_images' in dataset_cfg:
274
+ dataset_cfg.pop('max_images')
275
+
276
+ # Handle legacy dataset names.
277
+ v = dataset_cfg['tfrecord_dir']
278
+ v = v.replace('-32x32', '').replace('-32', '')
279
+ v = v.replace('-128x128', '').replace('-128', '')
280
+ v = v.replace('-256x256', '').replace('-256', '')
281
+ v = v.replace('-1024x1024', '').replace('-1024', '')
282
+ v = v.replace('celeba-hq', 'celebahq')
283
+ v = v.replace('cifar-10', 'cifar10')
284
+ v = v.replace('cifar-100', 'cifar100')
285
+ v = v.replace('mnist-rgb', 'mnistrgb')
286
+ v = re.sub('lsun-100k-([^-]*)', 'lsun-\\1-100k', v)
287
+ v = re.sub('lsun-full-([^-]*)', 'lsun-\\1-full', v)
288
+ dataset_cfg['tfrecord_dir'] = v
289
+
290
+ # Load dataset.
291
+ dataset_cfg.update(kwargs)
292
+ dataset_obj = dataset.load_dataset(data_dir=config.data_dir, **dataset_cfg)
293
+ return dataset_obj, mirror_augment
294
+
295
+ def apply_mirror_augment(minibatch):
296
+ mask = np.random.rand(minibatch.shape[0]) < 0.5
297
+ minibatch = np.array(minibatch)
298
+ minibatch[mask] = minibatch[mask, :, :, ::-1]
299
+ return minibatch
300
+
301
+ #----------------------------------------------------------------------------
302
+ # Text labels.
303
+
304
+ _text_label_cache = OrderedDict()
305
+
306
+ def draw_text_label(img, text, x, y, alignx=0.5, aligny=0.5, color=255, opacity=1.0, glow_opacity=1.0, **kwargs):
307
+ color = np.array(color).flatten().astype(np.float32)
308
+ assert img.ndim == 3 and img.shape[2] == color.size or color.size == 1
309
+ alpha, glow = setup_text_label(text, **kwargs)
310
+ xx, yy = int(np.rint(x - alpha.shape[1] * alignx)), int(np.rint(y - alpha.shape[0] * aligny))
311
+ xb, yb = max(-xx, 0), max(-yy, 0)
312
+ xe, ye = min(alpha.shape[1], img.shape[1] - xx), min(alpha.shape[0], img.shape[0] - yy)
313
+ img = np.array(img)
314
+ slice = img[yy+yb : yy+ye, xx+xb : xx+xe, :]
315
+ slice[:] = slice * (1.0 - (1.0 - (1.0 - alpha[yb:ye, xb:xe]) * (1.0 - glow[yb:ye, xb:xe] * glow_opacity)) * opacity)[:, :, np.newaxis]
316
+ slice[:] = slice + alpha[yb:ye, xb:xe, np.newaxis] * (color * opacity)[np.newaxis, np.newaxis, :]
317
+ return img
318
+
319
+ def setup_text_label(text, font='Calibri', fontsize=32, padding=6, glow_size=2.0, glow_coef=3.0, glow_exp=2.0, cache_size=100): # => (alpha, glow)
320
+ # Lookup from cache.
321
+ key = (text, font, fontsize, padding, glow_size, glow_coef, glow_exp)
322
+ if key in _text_label_cache:
323
+ value = _text_label_cache[key]
324
+ del _text_label_cache[key] # LRU policy
325
+ _text_label_cache[key] = value
326
+ return value
327
+
328
+ # Limit cache size.
329
+ while len(_text_label_cache) >= cache_size:
330
+ _text_label_cache.popitem(last=False)
331
+
332
+ # Render text.
333
+ import moviepy.editor # pip install moviepy
334
+ alpha = moviepy.editor.TextClip(text, font=font, fontsize=fontsize).mask.make_frame(0)
335
+ alpha = np.pad(alpha, padding, mode='constant', constant_values=0.0)
336
+ glow = scipy.ndimage.gaussian_filter(alpha, glow_size)
337
+ glow = 1.0 - np.maximum(1.0 - glow * glow_coef, 0.0) ** glow_exp
338
+
339
+ # Add to cache.
340
+ value = (alpha, glow)
341
+ _text_label_cache[key] = value
342
+ return value
343
+
344
+ #----------------------------------------------------------------------------
models/pggan_tf_official/networks.py ADDED
@@ -0,0 +1,315 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
2
+ #
3
+ # This work is licensed under the Creative Commons Attribution-NonCommercial
4
+ # 4.0 International License. To view a copy of this license, visit
5
+ # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
6
+ # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
7
+
8
+ import numpy as np
9
+ import tensorflow as tf
10
+
11
+ # NOTE: Do not import any application-specific modules here!
12
+
13
+ #----------------------------------------------------------------------------
14
+
15
+ def lerp(a, b, t): return a + (b - a) * t
16
+ def lerp_clip(a, b, t): return a + (b - a) * tf.clip_by_value(t, 0.0, 1.0)
17
+ def cset(cur_lambda, new_cond, new_lambda): return lambda: tf.cond(new_cond, new_lambda, cur_lambda)
18
+
19
+ #----------------------------------------------------------------------------
20
+ # Get/create weight tensor for a convolutional or fully-connected layer.
21
+
22
+ def get_weight(shape, gain=np.sqrt(2), use_wscale=False, fan_in=None):
23
+ if fan_in is None: fan_in = np.prod(shape[:-1])
24
+ std = gain / np.sqrt(fan_in) # He init
25
+ if use_wscale:
26
+ wscale = tf.constant(np.float32(std), name='wscale')
27
+ return tf.get_variable('weight', shape=shape, initializer=tf.initializers.random_normal()) * wscale
28
+ else:
29
+ return tf.get_variable('weight', shape=shape, initializer=tf.initializers.random_normal(0, std))
30
+
31
+ #----------------------------------------------------------------------------
32
+ # Fully-connected layer.
33
+
34
+ def dense(x, fmaps, gain=np.sqrt(2), use_wscale=False):
35
+ if len(x.shape) > 2:
36
+ x = tf.reshape(x, [-1, np.prod([d.value for d in x.shape[1:]])])
37
+ w = get_weight([x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale)
38
+ w = tf.cast(w, x.dtype)
39
+ return tf.matmul(x, w)
40
+
41
+ #----------------------------------------------------------------------------
42
+ # Convolutional layer.
43
+
44
+ def conv2d(x, fmaps, kernel, gain=np.sqrt(2), use_wscale=False):
45
+ assert kernel >= 1 and kernel % 2 == 1
46
+ w = get_weight([kernel, kernel, x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale)
47
+ w = tf.cast(w, x.dtype)
48
+ return tf.nn.conv2d(x, w, strides=[1,1,1,1], padding='SAME', data_format='NCHW')
49
+
50
+ #----------------------------------------------------------------------------
51
+ # Apply bias to the given activation tensor.
52
+
53
+ def apply_bias(x):
54
+ b = tf.get_variable('bias', shape=[x.shape[1]], initializer=tf.initializers.zeros())
55
+ b = tf.cast(b, x.dtype)
56
+ if len(x.shape) == 2:
57
+ return x + b
58
+ else:
59
+ return x + tf.reshape(b, [1, -1, 1, 1])
60
+
61
+ #----------------------------------------------------------------------------
62
+ # Leaky ReLU activation. Same as tf.nn.leaky_relu, but supports FP16.
63
+
64
+ def leaky_relu(x, alpha=0.2):
65
+ with tf.name_scope('LeakyRelu'):
66
+ alpha = tf.constant(alpha, dtype=x.dtype, name='alpha')
67
+ return tf.maximum(x * alpha, x)
68
+
69
+ #----------------------------------------------------------------------------
70
+ # Nearest-neighbor upscaling layer.
71
+
72
+ def upscale2d(x, factor=2):
73
+ assert isinstance(factor, int) and factor >= 1
74
+ if factor == 1: return x
75
+ with tf.variable_scope('Upscale2D'):
76
+ s = x.shape
77
+ x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1])
78
+ x = tf.tile(x, [1, 1, 1, factor, 1, factor])
79
+ x = tf.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor])
80
+ return x
81
+
82
+ #----------------------------------------------------------------------------
83
+ # Fused upscale2d + conv2d.
84
+ # Faster and uses less memory than performing the operations separately.
85
+
86
+ def upscale2d_conv2d(x, fmaps, kernel, gain=np.sqrt(2), use_wscale=False):
87
+ assert kernel >= 1 and kernel % 2 == 1
88
+ w = get_weight([kernel, kernel, fmaps, x.shape[1].value], gain=gain, use_wscale=use_wscale, fan_in=(kernel**2)*x.shape[1].value)
89
+ w = tf.pad(w, [[1,1], [1,1], [0,0], [0,0]], mode='CONSTANT')
90
+ w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]])
91
+ w = tf.cast(w, x.dtype)
92
+ os = [tf.shape(x)[0], fmaps, x.shape[2] * 2, x.shape[3] * 2]
93
+ return tf.nn.conv2d_transpose(x, w, os, strides=[1,1,2,2], padding='SAME', data_format='NCHW')
94
+
95
+ #----------------------------------------------------------------------------
96
+ # Box filter downscaling layer.
97
+
98
+ def downscale2d(x, factor=2):
99
+ assert isinstance(factor, int) and factor >= 1
100
+ if factor == 1: return x
101
+ with tf.variable_scope('Downscale2D'):
102
+ ksize = [1, 1, factor, factor]
103
+ return tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding='VALID', data_format='NCHW') # NOTE: requires tf_config['graph_options.place_pruned_graph'] = True
104
+
105
+ #----------------------------------------------------------------------------
106
+ # Fused conv2d + downscale2d.
107
+ # Faster and uses less memory than performing the operations separately.
108
+
109
+ def conv2d_downscale2d(x, fmaps, kernel, gain=np.sqrt(2), use_wscale=False):
110
+ assert kernel >= 1 and kernel % 2 == 1
111
+ w = get_weight([kernel, kernel, x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale)
112
+ w = tf.pad(w, [[1,1], [1,1], [0,0], [0,0]], mode='CONSTANT')
113
+ w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]]) * 0.25
114
+ w = tf.cast(w, x.dtype)
115
+ return tf.nn.conv2d(x, w, strides=[1,1,2,2], padding='SAME', data_format='NCHW')
116
+
117
+ #----------------------------------------------------------------------------
118
+ # Pixelwise feature vector normalization.
119
+
120
+ def pixel_norm(x, epsilon=1e-8):
121
+ with tf.variable_scope('PixelNorm'):
122
+ return x * tf.rsqrt(tf.reduce_mean(tf.square(x), axis=1, keepdims=True) + epsilon)
123
+
124
+ #----------------------------------------------------------------------------
125
+ # Minibatch standard deviation.
126
+
127
+ def minibatch_stddev_layer(x, group_size=4):
128
+ with tf.variable_scope('MinibatchStddev'):
129
+ group_size = tf.minimum(group_size, tf.shape(x)[0]) # Minibatch must be divisible by (or smaller than) group_size.
130
+ s = x.shape # [NCHW] Input shape.
131
+ y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) # [GMCHW] Split minibatch into M groups of size G.
132
+ y = tf.cast(y, tf.float32) # [GMCHW] Cast to FP32.
133
+ y -= tf.reduce_mean(y, axis=0, keepdims=True) # [GMCHW] Subtract mean over group.
134
+ y = tf.reduce_mean(tf.square(y), axis=0) # [MCHW] Calc variance over group.
135
+ y = tf.sqrt(y + 1e-8) # [MCHW] Calc stddev over group.
136
+ y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) # [M111] Take average over fmaps and pixels.
137
+ y = tf.cast(y, x.dtype) # [M111] Cast back to original data type.
138
+ y = tf.tile(y, [group_size, 1, s[2], s[3]]) # [N1HW] Replicate over group and pixels.
139
+ return tf.concat([x, y], axis=1) # [NCHW] Append as new fmap.
140
+
141
+ #----------------------------------------------------------------------------
142
+ # Generator network used in the paper.
143
+
144
+ def G_paper(
145
+ latents_in, # First input: Latent vectors [minibatch, latent_size].
146
+ labels_in, # Second input: Labels [minibatch, label_size].
147
+ num_channels = 1, # Number of output color channels. Overridden based on dataset.
148
+ resolution = 32, # Output resolution. Overridden based on dataset.
149
+ label_size = 0, # Dimensionality of the labels, 0 if no labels. Overridden based on dataset.
150
+ fmap_base = 8192, # Overall multiplier for the number of feature maps.
151
+ fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution.
152
+ fmap_max = 512, # Maximum number of feature maps in any layer.
153
+ latent_size = None, # Dimensionality of the latent vectors. None = min(fmap_base, fmap_max).
154
+ normalize_latents = True, # Normalize latent vectors before feeding them to the network?
155
+ use_wscale = True, # Enable equalized learning rate?
156
+ use_pixelnorm = True, # Enable pixelwise feature vector normalization?
157
+ pixelnorm_epsilon = 1e-8, # Constant epsilon for pixelwise feature vector normalization.
158
+ use_leakyrelu = True, # True = leaky ReLU, False = ReLU.
159
+ dtype = 'float32', # Data type to use for activations and outputs.
160
+ fused_scale = True, # True = use fused upscale2d + conv2d, False = separate upscale2d layers.
161
+ structure = None, # 'linear' = human-readable, 'recursive' = efficient, None = select automatically.
162
+ is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation.
163
+ **kwargs): # Ignore unrecognized keyword args.
164
+
165
+ resolution_log2 = int(np.log2(resolution))
166
+ assert resolution == 2**resolution_log2 and resolution >= 4
167
+ def nf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max)
168
+ def PN(x): return pixel_norm(x, epsilon=pixelnorm_epsilon) if use_pixelnorm else x
169
+ if latent_size is None: latent_size = nf(0)
170
+ if structure is None: structure = 'linear' if is_template_graph else 'recursive'
171
+ act = leaky_relu if use_leakyrelu else tf.nn.relu
172
+
173
+ latents_in.set_shape([None, latent_size])
174
+ labels_in.set_shape([None, label_size])
175
+ combo_in = tf.cast(tf.concat([latents_in, labels_in], axis=1), dtype)
176
+ lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0.0), trainable=False), dtype)
177
+
178
+ # Building blocks.
179
+ def block(x, res): # res = 2..resolution_log2
180
+ with tf.variable_scope('%dx%d' % (2**res, 2**res)):
181
+ if res == 2: # 4x4
182
+ if normalize_latents: x = pixel_norm(x, epsilon=pixelnorm_epsilon)
183
+ with tf.variable_scope('Dense'):
184
+ x = dense(x, fmaps=nf(res-1)*16, gain=np.sqrt(2)/4, use_wscale=use_wscale) # override gain to match the original Theano implementation
185
+ x = tf.reshape(x, [-1, nf(res-1), 4, 4])
186
+ x = PN(act(apply_bias(x)))
187
+ with tf.variable_scope('Conv'):
188
+ x = PN(act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale))))
189
+ else: # 8x8 and up
190
+ if fused_scale:
191
+ with tf.variable_scope('Conv0_up'):
192
+ x = PN(act(apply_bias(upscale2d_conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale))))
193
+ else:
194
+ x = upscale2d(x)
195
+ with tf.variable_scope('Conv0'):
196
+ x = PN(act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale))))
197
+ with tf.variable_scope('Conv1'):
198
+ x = PN(act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale))))
199
+ return x
200
+ def torgb(x, res): # res = 2..resolution_log2
201
+ lod = resolution_log2 - res
202
+ with tf.variable_scope('ToRGB_lod%d' % lod):
203
+ return apply_bias(conv2d(x, fmaps=num_channels, kernel=1, gain=1, use_wscale=use_wscale))
204
+
205
+ # Linear structure: simple but inefficient.
206
+ if structure == 'linear':
207
+ x = block(combo_in, 2)
208
+ images_out = torgb(x, 2)
209
+ for res in range(3, resolution_log2 + 1):
210
+ lod = resolution_log2 - res
211
+ x = block(x, res)
212
+ img = torgb(x, res)
213
+ images_out = upscale2d(images_out)
214
+ with tf.variable_scope('Grow_lod%d' % lod):
215
+ images_out = lerp_clip(img, images_out, lod_in - lod)
216
+
217
+ # Recursive structure: complex but efficient.
218
+ if structure == 'recursive':
219
+ def grow(x, res, lod):
220
+ y = block(x, res)
221
+ img = lambda: upscale2d(torgb(y, res), 2**lod)
222
+ if res > 2: img = cset(img, (lod_in > lod), lambda: upscale2d(lerp(torgb(y, res), upscale2d(torgb(x, res - 1)), lod_in - lod), 2**lod))
223
+ if lod > 0: img = cset(img, (lod_in < lod), lambda: grow(y, res + 1, lod - 1))
224
+ return img()
225
+ images_out = grow(combo_in, 2, resolution_log2 - 2)
226
+
227
+ assert images_out.dtype == tf.as_dtype(dtype)
228
+ images_out = tf.identity(images_out, name='images_out')
229
+ return images_out
230
+
231
+ #----------------------------------------------------------------------------
232
+ # Discriminator network used in the paper.
233
+
234
+ def D_paper(
235
+ images_in, # Input: Images [minibatch, channel, height, width].
236
+ num_channels = 1, # Number of input color channels. Overridden based on dataset.
237
+ resolution = 32, # Input resolution. Overridden based on dataset.
238
+ label_size = 0, # Dimensionality of the labels, 0 if no labels. Overridden based on dataset.
239
+ fmap_base = 8192, # Overall multiplier for the number of feature maps.
240
+ fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution.
241
+ fmap_max = 512, # Maximum number of feature maps in any layer.
242
+ use_wscale = True, # Enable equalized learning rate?
243
+ mbstd_group_size = 4, # Group size for the minibatch standard deviation layer, 0 = disable.
244
+ dtype = 'float32', # Data type to use for activations and outputs.
245
+ fused_scale = True, # True = use fused conv2d + downscale2d, False = separate downscale2d layers.
246
+ structure = None, # 'linear' = human-readable, 'recursive' = efficient, None = select automatically
247
+ is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation.
248
+ **kwargs): # Ignore unrecognized keyword args.
249
+
250
+ resolution_log2 = int(np.log2(resolution))
251
+ assert resolution == 2**resolution_log2 and resolution >= 4
252
+ def nf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max)
253
+ if structure is None: structure = 'linear' if is_template_graph else 'recursive'
254
+ act = leaky_relu
255
+
256
+ images_in.set_shape([None, num_channels, resolution, resolution])
257
+ images_in = tf.cast(images_in, dtype)
258
+ lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0.0), trainable=False), dtype)
259
+
260
+ # Building blocks.
261
+ def fromrgb(x, res): # res = 2..resolution_log2
262
+ with tf.variable_scope('FromRGB_lod%d' % (resolution_log2 - res)):
263
+ return act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=1, use_wscale=use_wscale)))
264
+ def block(x, res): # res = 2..resolution_log2
265
+ with tf.variable_scope('%dx%d' % (2**res, 2**res)):
266
+ if res >= 3: # 8x8 and up
267
+ with tf.variable_scope('Conv0'):
268
+ x = act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale)))
269
+ if fused_scale:
270
+ with tf.variable_scope('Conv1_down'):
271
+ x = act(apply_bias(conv2d_downscale2d(x, fmaps=nf(res-2), kernel=3, use_wscale=use_wscale)))
272
+ else:
273
+ with tf.variable_scope('Conv1'):
274
+ x = act(apply_bias(conv2d(x, fmaps=nf(res-2), kernel=3, use_wscale=use_wscale)))
275
+ x = downscale2d(x)
276
+ else: # 4x4
277
+ if mbstd_group_size > 1:
278
+ x = minibatch_stddev_layer(x, mbstd_group_size)
279
+ with tf.variable_scope('Conv'):
280
+ x = act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale)))
281
+ with tf.variable_scope('Dense0'):
282
+ x = act(apply_bias(dense(x, fmaps=nf(res-2), use_wscale=use_wscale)))
283
+ with tf.variable_scope('Dense1'):
284
+ x = apply_bias(dense(x, fmaps=1+label_size, gain=1, use_wscale=use_wscale))
285
+ return x
286
+
287
+ # Linear structure: simple but inefficient.
288
+ if structure == 'linear':
289
+ img = images_in
290
+ x = fromrgb(img, resolution_log2)
291
+ for res in range(resolution_log2, 2, -1):
292
+ lod = resolution_log2 - res
293
+ x = block(x, res)
294
+ img = downscale2d(img)
295
+ y = fromrgb(img, res - 1)
296
+ with tf.variable_scope('Grow_lod%d' % lod):
297
+ x = lerp_clip(x, y, lod_in - lod)
298
+ combo_out = block(x, 2)
299
+
300
+ # Recursive structure: complex but efficient.
301
+ if structure == 'recursive':
302
+ def grow(res, lod):
303
+ x = lambda: fromrgb(downscale2d(images_in, 2**lod), res)
304
+ if lod > 0: x = cset(x, (lod_in < lod), lambda: grow(res + 1, lod - 1))
305
+ x = block(x(), res); y = lambda: x
306
+ if res > 2: y = cset(y, (lod_in > lod), lambda: lerp(x, fromrgb(downscale2d(images_in, 2**(lod+1)), res - 1), lod_in - lod))
307
+ return y()
308
+ combo_out = grow(2, resolution_log2 - 2)
309
+
310
+ assert combo_out.dtype == tf.as_dtype(dtype)
311
+ scores_out = tf.identity(combo_out[:, :1], name='scores_out')
312
+ labels_out = tf.identity(combo_out[:, 1:], name='labels_out')
313
+ return scores_out, labels_out
314
+
315
+ #----------------------------------------------------------------------------
models/pggan_tf_official/requirements-pip.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
1
+ numpy>=1.13.3
2
+ scipy>=1.0.0
3
+ tensorflow-gpu>=1.6.0
4
+ moviepy>=0.2.3.2
5
+ Pillow>=3.1.1
6
+ lmdb>=0.93
7
+ opencv-python>=3.4.0.12
8
+ cryptography>=2.1.4
9
+ h5py>=2.7.1
10
+ six>=1.11.0
models/pggan_tf_official/tfutil.py ADDED
@@ -0,0 +1,749 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
2
+ #
3
+ # This work is licensed under the Creative Commons Attribution-NonCommercial
4
+ # 4.0 International License. To view a copy of this license, visit
5
+ # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
6
+ # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
7
+
8
+ import os
9
+ import sys
10
+ import inspect
11
+ import importlib
12
+ import imp
13
+ import numpy as np
14
+ from collections import OrderedDict
15
+ import tensorflow as tf
16
+
17
+ #----------------------------------------------------------------------------
18
+ # Convenience.
19
+
20
+ def run(*args, **kwargs): # Run the specified ops in the default session.
21
+ return tf.get_default_session().run(*args, **kwargs)
22
+
23
+ def is_tf_expression(x):
24
+ return isinstance(x, tf.Tensor) or isinstance(x, tf.Variable) or isinstance(x, tf.Operation)
25
+
26
+ def shape_to_list(shape):
27
+ return [dim.value for dim in shape]
28
+
29
+ def flatten(x):
30
+ with tf.name_scope('Flatten'):
31
+ return tf.reshape(x, [-1])
32
+
33
+ def log2(x):
34
+ with tf.name_scope('Log2'):
35
+ return tf.log(x) * np.float32(1.0 / np.log(2.0))
36
+
37
+ def exp2(x):
38
+ with tf.name_scope('Exp2'):
39
+ return tf.exp(x * np.float32(np.log(2.0)))
40
+
41
+ def lerp(a, b, t):
42
+ with tf.name_scope('Lerp'):
43
+ return a + (b - a) * t
44
+
45
+ def lerp_clip(a, b, t):
46
+ with tf.name_scope('LerpClip'):
47
+ return a + (b - a) * tf.clip_by_value(t, 0.0, 1.0)
48
+
49
+ def absolute_name_scope(scope): # Forcefully enter the specified name scope, ignoring any surrounding scopes.
50
+ return tf.name_scope(scope + '/')
51
+
52
+ #----------------------------------------------------------------------------
53
+ # Initialize TensorFlow graph and session using good default settings.
54
+
55
+ def init_tf(config_dict=dict()):
56
+ if tf.get_default_session() is None:
57
+ tf.set_random_seed(np.random.randint(1 << 31))
58
+ create_session(config_dict, force_as_default=True)
59
+
60
+ #----------------------------------------------------------------------------
61
+ # Create tf.Session based on config dict of the form
62
+ # {'gpu_options.allow_growth': True}
63
+
64
+ def create_session(config_dict=dict(), force_as_default=False):
65
+ config = tf.ConfigProto()
66
+ for key, value in config_dict.items():
67
+ fields = key.split('.')
68
+ obj = config
69
+ for field in fields[:-1]:
70
+ obj = getattr(obj, field)
71
+ setattr(obj, fields[-1], value)
72
+ session = tf.Session(config=config)
73
+ if force_as_default:
74
+ session._default_session = session.as_default()
75
+ session._default_session.enforce_nesting = False
76
+ session._default_session.__enter__()
77
+ return session
78
+
79
+ #----------------------------------------------------------------------------
80
+ # Initialize all tf.Variables that have not already been initialized.
81
+ # Equivalent to the following, but more efficient and does not bloat the tf graph:
82
+ # tf.variables_initializer(tf.report_unitialized_variables()).run()
83
+
84
+ def init_uninited_vars(vars=None):
85
+ if vars is None: vars = tf.global_variables()
86
+ test_vars = []; test_ops = []
87
+ with tf.control_dependencies(None): # ignore surrounding control_dependencies
88
+ for var in vars:
89
+ assert is_tf_expression(var)
90
+ try:
91
+ tf.get_default_graph().get_tensor_by_name(var.name.replace(':0', '/IsVariableInitialized:0'))
92
+ except KeyError:
93
+ # Op does not exist => variable may be uninitialized.
94
+ test_vars.append(var)
95
+ with absolute_name_scope(var.name.split(':')[0]):
96
+ test_ops.append(tf.is_variable_initialized(var))
97
+ init_vars = [var for var, inited in zip(test_vars, run(test_ops)) if not inited]
98
+ run([var.initializer for var in init_vars])
99
+
100
+ #----------------------------------------------------------------------------
101
+ # Set the values of given tf.Variables.
102
+ # Equivalent to the following, but more efficient and does not bloat the tf graph:
103
+ # tfutil.run([tf.assign(var, value) for var, value in var_to_value_dict.items()]
104
+
105
+ def set_vars(var_to_value_dict):
106
+ ops = []
107
+ feed_dict = {}
108
+ for var, value in var_to_value_dict.items():
109
+ assert is_tf_expression(var)
110
+ try:
111
+ setter = tf.get_default_graph().get_tensor_by_name(var.name.replace(':0', '/setter:0')) # look for existing op
112
+ except KeyError:
113
+ with absolute_name_scope(var.name.split(':')[0]):
114
+ with tf.control_dependencies(None): # ignore surrounding control_dependencies
115
+ setter = tf.assign(var, tf.placeholder(var.dtype, var.shape, 'new_value'), name='setter') # create new setter
116
+ ops.append(setter)
117
+ feed_dict[setter.op.inputs[1]] = value
118
+ run(ops, feed_dict)
119
+
120
+ #----------------------------------------------------------------------------
121
+ # Autosummary creates an identity op that internally keeps track of the input
122
+ # values and automatically shows up in TensorBoard. The reported value
123
+ # represents an average over input components. The average is accumulated
124
+ # constantly over time and flushed when save_summaries() is called.
125
+ #
126
+ # Notes:
127
+ # - The output tensor must be used as an input for something else in the
128
+ # graph. Otherwise, the autosummary op will not get executed, and the average
129
+ # value will not get accumulated.
130
+ # - It is perfectly fine to include autosummaries with the same name in
131
+ # several places throughout the graph, even if they are executed concurrently.
132
+ # - It is ok to also pass in a python scalar or numpy array. In this case, it
133
+ # is added to the average immediately.
134
+
135
+ _autosummary_vars = OrderedDict() # name => [var, ...]
136
+ _autosummary_immediate = OrderedDict() # name => update_op, update_value
137
+ _autosummary_finalized = False
138
+
139
+ def autosummary(name, value):
140
+ id = name.replace('/', '_')
141
+ if is_tf_expression(value):
142
+ with tf.name_scope('summary_' + id), tf.device(value.device):
143
+ update_op = _create_autosummary_var(name, value)
144
+ with tf.control_dependencies([update_op]):
145
+ return tf.identity(value)
146
+ else: # python scalar or numpy array
147
+ if name not in _autosummary_immediate:
148
+ with absolute_name_scope('Autosummary/' + id), tf.device(None), tf.control_dependencies(None):
149
+ update_value = tf.placeholder(tf.float32)
150
+ update_op = _create_autosummary_var(name, update_value)
151
+ _autosummary_immediate[name] = update_op, update_value
152
+ update_op, update_value = _autosummary_immediate[name]
153
+ run(update_op, {update_value: np.float32(value)})
154
+ return value
155
+
156
+ # Create the necessary ops to include autosummaries in TensorBoard report.
157
+ # Note: This should be done only once per graph.
158
+ def finalize_autosummaries():
159
+ global _autosummary_finalized
160
+ if _autosummary_finalized:
161
+ return
162
+ _autosummary_finalized = True
163
+ init_uninited_vars([var for vars in _autosummary_vars.values() for var in vars])
164
+ with tf.device(None), tf.control_dependencies(None):
165
+ for name, vars in _autosummary_vars.items():
166
+ id = name.replace('/', '_')
167
+ with absolute_name_scope('Autosummary/' + id):
168
+ sum = tf.add_n(vars)
169
+ avg = sum[0] / sum[1]
170
+ with tf.control_dependencies([avg]): # read before resetting
171
+ reset_ops = [tf.assign(var, tf.zeros(2)) for var in vars]
172
+ with tf.name_scope(None), tf.control_dependencies(reset_ops): # reset before reporting
173
+ tf.summary.scalar(name, avg)
174
+
175
+ # Internal helper for creating autosummary accumulators.
176
+ def _create_autosummary_var(name, value_expr):
177
+ assert not _autosummary_finalized
178
+ v = tf.cast(value_expr, tf.float32)
179
+ if v.shape.ndims is 0:
180
+ v = [v, np.float32(1.0)]
181
+ elif v.shape.ndims is 1:
182
+ v = [tf.reduce_sum(v), tf.cast(tf.shape(v)[0], tf.float32)]
183
+ else:
184
+ v = [tf.reduce_sum(v), tf.reduce_prod(tf.cast(tf.shape(v), tf.float32))]
185
+ v = tf.cond(tf.is_finite(v[0]), lambda: tf.stack(v), lambda: tf.zeros(2))
186
+ with tf.control_dependencies(None):
187
+ var = tf.Variable(tf.zeros(2)) # [numerator, denominator]
188
+ update_op = tf.cond(tf.is_variable_initialized(var), lambda: tf.assign_add(var, v), lambda: tf.assign(var, v))
189
+ if name in _autosummary_vars:
190
+ _autosummary_vars[name].append(var)
191
+ else:
192
+ _autosummary_vars[name] = [var]
193
+ return update_op
194
+
195
+ #----------------------------------------------------------------------------
196
+ # Call filewriter.add_summary() with all summaries in the default graph,
197
+ # automatically finalizing and merging them on the first call.
198
+
199
+ _summary_merge_op = None
200
+
201
+ def save_summaries(filewriter, global_step=None):
202
+ global _summary_merge_op
203
+ if _summary_merge_op is None:
204
+ finalize_autosummaries()
205
+ with tf.device(None), tf.control_dependencies(None):
206
+ _summary_merge_op = tf.summary.merge_all()
207
+ filewriter.add_summary(_summary_merge_op.eval(), global_step)
208
+
209
+ #----------------------------------------------------------------------------
210
+ # Utilities for importing modules and objects by name.
211
+
212
+ def import_module(module_or_obj_name):
213
+ parts = module_or_obj_name.split('.')
214
+ parts[0] = {'np': 'numpy', 'tf': 'tensorflow'}.get(parts[0], parts[0])
215
+ for i in range(len(parts), 0, -1):
216
+ try:
217
+ module = importlib.import_module('.'.join(parts[:i]))
218
+ relative_obj_name = '.'.join(parts[i:])
219
+ return module, relative_obj_name
220
+ except ImportError:
221
+ pass
222
+ raise ImportError(module_or_obj_name)
223
+
224
+ def find_obj_in_module(module, relative_obj_name):
225
+ obj = module
226
+ for part in relative_obj_name.split('.'):
227
+ obj = getattr(obj, part)
228
+ return obj
229
+
230
+ def import_obj(obj_name):
231
+ module, relative_obj_name = import_module(obj_name)
232
+ return find_obj_in_module(module, relative_obj_name)
233
+
234
+ def call_func_by_name(*args, func=None, **kwargs):
235
+ assert func is not None
236
+ return import_obj(func)(*args, **kwargs)
237
+
238
+ #----------------------------------------------------------------------------
239
+ # Wrapper for tf.train.Optimizer that automatically takes care of:
240
+ # - Gradient averaging for multi-GPU training.
241
+ # - Dynamic loss scaling and typecasts for FP16 training.
242
+ # - Ignoring corrupted gradients that contain NaNs/Infs.
243
+ # - Reporting statistics.
244
+ # - Well-chosen default settings.
245
+
246
+ class Optimizer:
247
+ def __init__(
248
+ self,
249
+ name = 'Train',
250
+ tf_optimizer = 'tf.train.AdamOptimizer',
251
+ learning_rate = 0.001,
252
+ use_loss_scaling = False,
253
+ loss_scaling_init = 64.0,
254
+ loss_scaling_inc = 0.0005,
255
+ loss_scaling_dec = 1.0,
256
+ **kwargs):
257
+
258
+ # Init fields.
259
+ self.name = name
260
+ self.learning_rate = tf.convert_to_tensor(learning_rate)
261
+ self.id = self.name.replace('/', '.')
262
+ self.scope = tf.get_default_graph().unique_name(self.id)
263
+ self.optimizer_class = import_obj(tf_optimizer)
264
+ self.optimizer_kwargs = dict(kwargs)
265
+ self.use_loss_scaling = use_loss_scaling
266
+ self.loss_scaling_init = loss_scaling_init
267
+ self.loss_scaling_inc = loss_scaling_inc
268
+ self.loss_scaling_dec = loss_scaling_dec
269
+ self._grad_shapes = None # [shape, ...]
270
+ self._dev_opt = OrderedDict() # device => optimizer
271
+ self._dev_grads = OrderedDict() # device => [[(grad, var), ...], ...]
272
+ self._dev_ls_var = OrderedDict() # device => variable (log2 of loss scaling factor)
273
+ self._updates_applied = False
274
+
275
+ # Register the gradients of the given loss function with respect to the given variables.
276
+ # Intended to be called once per GPU.
277
+ def register_gradients(self, loss, vars):
278
+ assert not self._updates_applied
279
+
280
+ # Validate arguments.
281
+ if isinstance(vars, dict):
282
+ vars = list(vars.values()) # allow passing in Network.trainables as vars
283
+ assert isinstance(vars, list) and len(vars) >= 1
284
+ assert all(is_tf_expression(expr) for expr in vars + [loss])
285
+ if self._grad_shapes is None:
286
+ self._grad_shapes = [shape_to_list(var.shape) for var in vars]
287
+ assert len(vars) == len(self._grad_shapes)
288
+ assert all(shape_to_list(var.shape) == var_shape for var, var_shape in zip(vars, self._grad_shapes))
289
+ dev = loss.device
290
+ assert all(var.device == dev for var in vars)
291
+
292
+ # Register device and compute gradients.
293
+ with tf.name_scope(self.id + '_grad'), tf.device(dev):
294
+ if dev not in self._dev_opt:
295
+ opt_name = self.scope.replace('/', '_') + '_opt%d' % len(self._dev_opt)
296
+ self._dev_opt[dev] = self.optimizer_class(name=opt_name, learning_rate=self.learning_rate, **self.optimizer_kwargs)
297
+ self._dev_grads[dev] = []
298
+ loss = self.apply_loss_scaling(tf.cast(loss, tf.float32))
299
+ grads = self._dev_opt[dev].compute_gradients(loss, vars, gate_gradients=tf.train.Optimizer.GATE_NONE) # disable gating to reduce memory usage
300
+ grads = [(g, v) if g is not None else (tf.zeros_like(v), v) for g, v in grads] # replace disconnected gradients with zeros
301
+ self._dev_grads[dev].append(grads)
302
+
303
+ # Construct training op to update the registered variables based on their gradients.
304
+ def apply_updates(self):
305
+ assert not self._updates_applied
306
+ self._updates_applied = True
307
+ devices = list(self._dev_grads.keys())
308
+ total_grads = sum(len(grads) for grads in self._dev_grads.values())
309
+ assert len(devices) >= 1 and total_grads >= 1
310
+ ops = []
311
+ with absolute_name_scope(self.scope):
312
+
313
+ # Cast gradients to FP32 and calculate partial sum within each device.
314
+ dev_grads = OrderedDict() # device => [(grad, var), ...]
315
+ for dev_idx, dev in enumerate(devices):
316
+ with tf.name_scope('ProcessGrads%d' % dev_idx), tf.device(dev):
317
+ sums = []
318
+ for gv in zip(*self._dev_grads[dev]):
319
+ assert all(v is gv[0][1] for g, v in gv)
320
+ g = [tf.cast(g, tf.float32) for g, v in gv]
321
+ g = g[0] if len(g) == 1 else tf.add_n(g)
322
+ sums.append((g, gv[0][1]))
323
+ dev_grads[dev] = sums
324
+
325
+ # Sum gradients across devices.
326
+ if len(devices) > 1:
327
+ with tf.name_scope('SumAcrossGPUs'), tf.device(None):
328
+ for var_idx, grad_shape in enumerate(self._grad_shapes):
329
+ g = [dev_grads[dev][var_idx][0] for dev in devices]
330
+ if np.prod(grad_shape): # nccl does not support zero-sized tensors
331
+ g = tf.contrib.nccl.all_sum(g)
332
+ for dev, gg in zip(devices, g):
333
+ dev_grads[dev][var_idx] = (gg, dev_grads[dev][var_idx][1])
334
+
335
+ # Apply updates separately on each device.
336
+ for dev_idx, (dev, grads) in enumerate(dev_grads.items()):
337
+ with tf.name_scope('ApplyGrads%d' % dev_idx), tf.device(dev):
338
+
339
+ # Scale gradients as needed.
340
+ if self.use_loss_scaling or total_grads > 1:
341
+ with tf.name_scope('Scale'):
342
+ coef = tf.constant(np.float32(1.0 / total_grads), name='coef')
343
+ coef = self.undo_loss_scaling(coef)
344
+ grads = [(g * coef, v) for g, v in grads]
345
+
346
+ # Check for overflows.
347
+ with tf.name_scope('CheckOverflow'):
348
+ grad_ok = tf.reduce_all(tf.stack([tf.reduce_all(tf.is_finite(g)) for g, v in grads]))
349
+
350
+ # Update weights and adjust loss scaling.
351
+ with tf.name_scope('UpdateWeights'):
352
+ opt = self._dev_opt[dev]
353
+ ls_var = self.get_loss_scaling_var(dev)
354
+ if not self.use_loss_scaling:
355
+ ops.append(tf.cond(grad_ok, lambda: opt.apply_gradients(grads), tf.no_op))
356
+ else:
357
+ ops.append(tf.cond(grad_ok,
358
+ lambda: tf.group(tf.assign_add(ls_var, self.loss_scaling_inc), opt.apply_gradients(grads)),
359
+ lambda: tf.group(tf.assign_sub(ls_var, self.loss_scaling_dec))))
360
+
361
+ # Report statistics on the last device.
362
+ if dev == devices[-1]:
363
+ with tf.name_scope('Statistics'):
364
+ ops.append(autosummary(self.id + '/learning_rate', self.learning_rate))
365
+ ops.append(autosummary(self.id + '/overflow_frequency', tf.where(grad_ok, 0, 1)))
366
+ if self.use_loss_scaling:
367
+ ops.append(autosummary(self.id + '/loss_scaling_log2', ls_var))
368
+
369
+ # Initialize variables and group everything into a single op.
370
+ self.reset_optimizer_state()
371
+ init_uninited_vars(list(self._dev_ls_var.values()))
372
+ return tf.group(*ops, name='TrainingOp')
373
+
374
+ # Reset internal state of the underlying optimizer.
375
+ def reset_optimizer_state(self):
376
+ run([var.initializer for opt in self._dev_opt.values() for var in opt.variables()])
377
+
378
+ # Get or create variable representing log2 of the current dynamic loss scaling factor.
379
+ def get_loss_scaling_var(self, device):
380
+ if not self.use_loss_scaling:
381
+ return None
382
+ if device not in self._dev_ls_var:
383
+ with absolute_name_scope(self.scope + '/LossScalingVars'), tf.control_dependencies(None):
384
+ self._dev_ls_var[device] = tf.Variable(np.float32(self.loss_scaling_init), name='loss_scaling_var')
385
+ return self._dev_ls_var[device]
386
+
387
+ # Apply dynamic loss scaling for the given expression.
388
+ def apply_loss_scaling(self, value):
389
+ assert is_tf_expression(value)
390
+ if not self.use_loss_scaling:
391
+ return value
392
+ return value * exp2(self.get_loss_scaling_var(value.device))
393
+
394
+ # Undo the effect of dynamic loss scaling for the given expression.
395
+ def undo_loss_scaling(self, value):
396
+ assert is_tf_expression(value)
397
+ if not self.use_loss_scaling:
398
+ return value
399
+ return value * exp2(-self.get_loss_scaling_var(value.device))
400
+
401
+ #----------------------------------------------------------------------------
402
+ # Generic network abstraction.
403
+ #
404
+ # Acts as a convenience wrapper for a parameterized network construction
405
+ # function, providing several utility methods and convenient access to
406
+ # the inputs/outputs/weights.
407
+ #
408
+ # Network objects can be safely pickled and unpickled for long-term
409
+ # archival purposes. The pickling works reliably as long as the underlying
410
+ # network construction function is defined in a standalone Python module
411
+ # that has no side effects or application-specific imports.
412
+
413
+ network_import_handlers = [] # Custom import handlers for dealing with legacy data in pickle import.
414
+ _network_import_modules = [] # Temporary modules create during pickle import.
415
+
416
+ class Network:
417
+ def __init__(self,
418
+ name=None, # Network name. Used to select TensorFlow name and variable scopes.
419
+ func=None, # Fully qualified name of the underlying network construction function.
420
+ **static_kwargs): # Keyword arguments to be passed in to the network construction function.
421
+
422
+ self._init_fields()
423
+ self.name = name
424
+ self.static_kwargs = dict(static_kwargs)
425
+
426
+ # Init build func.
427
+ module, self._build_func_name = import_module(func)
428
+ self._build_module_src = inspect.getsource(module)
429
+ self._build_func = find_obj_in_module(module, self._build_func_name)
430
+
431
+ # Init graph.
432
+ self._init_graph()
433
+ self.reset_vars()
434
+
435
+ def _init_fields(self):
436
+ self.name = None # User-specified name, defaults to build func name if None.
437
+ self.scope = None # Unique TF graph scope, derived from the user-specified name.
438
+ self.static_kwargs = dict() # Arguments passed to the user-supplied build func.
439
+ self.num_inputs = 0 # Number of input tensors.
440
+ self.num_outputs = 0 # Number of output tensors.
441
+ self.input_shapes = [[]] # Input tensor shapes (NC or NCHW), including minibatch dimension.
442
+ self.output_shapes = [[]] # Output tensor shapes (NC or NCHW), including minibatch dimension.
443
+ self.input_shape = [] # Short-hand for input_shapes[0].
444
+ self.output_shape = [] # Short-hand for output_shapes[0].
445
+ self.input_templates = [] # Input placeholders in the template graph.
446
+ self.output_templates = [] # Output tensors in the template graph.
447
+ self.input_names = [] # Name string for each input.
448
+ self.output_names = [] # Name string for each output.
449
+ self.vars = OrderedDict() # All variables (localname => var).
450
+ self.trainables = OrderedDict() # Trainable variables (localname => var).
451
+ self._build_func = None # User-supplied build function that constructs the network.
452
+ self._build_func_name = None # Name of the build function.
453
+ self._build_module_src = None # Full source code of the module containing the build function.
454
+ self._run_cache = dict() # Cached graph data for Network.run().
455
+
456
+ def _init_graph(self):
457
+ # Collect inputs.
458
+ self.input_names = []
459
+ for param in inspect.signature(self._build_func).parameters.values():
460
+ if param.kind == param.POSITIONAL_OR_KEYWORD and param.default is param.empty:
461
+ self.input_names.append(param.name)
462
+ self.num_inputs = len(self.input_names)
463
+ assert self.num_inputs >= 1
464
+
465
+ # Choose name and scope.
466
+ if self.name is None:
467
+ self.name = self._build_func_name
468
+ self.scope = tf.get_default_graph().unique_name(self.name.replace('/', '_'), mark_as_used=False)
469
+
470
+ # Build template graph.
471
+ with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):
472
+ assert tf.get_variable_scope().name == self.scope
473
+ with absolute_name_scope(self.scope): # ignore surrounding name_scope
474
+ with tf.control_dependencies(None): # ignore surrounding control_dependencies
475
+ self.input_templates = [tf.placeholder(tf.float32, name=name) for name in self.input_names]
476
+ out_expr = self._build_func(*self.input_templates, is_template_graph=True, **self.static_kwargs)
477
+
478
+ # Collect outputs.
479
+ assert is_tf_expression(out_expr) or isinstance(out_expr, tuple)
480
+ self.output_templates = [out_expr] if is_tf_expression(out_expr) else list(out_expr)
481
+ self.output_names = [t.name.split('/')[-1].split(':')[0] for t in self.output_templates]
482
+ self.num_outputs = len(self.output_templates)
483
+ assert self.num_outputs >= 1
484
+
485
+ # Populate remaining fields.
486
+ self.input_shapes = [shape_to_list(t.shape) for t in self.input_templates]
487
+ self.output_shapes = [shape_to_list(t.shape) for t in self.output_templates]
488
+ self.input_shape = self.input_shapes[0]
489
+ self.output_shape = self.output_shapes[0]
490
+ self.vars = OrderedDict([(self.get_var_localname(var), var) for var in tf.global_variables(self.scope + '/')])
491
+ self.trainables = OrderedDict([(self.get_var_localname(var), var) for var in tf.trainable_variables(self.scope + '/')])
492
+
493
+ # Run initializers for all variables defined by this network.
494
+ def reset_vars(self):
495
+ run([var.initializer for var in self.vars.values()])
496
+
497
+ # Run initializers for all trainable variables defined by this network.
498
+ def reset_trainables(self):
499
+ run([var.initializer for var in self.trainables.values()])
500
+
501
+ # Get TensorFlow expression(s) for the output(s) of this network, given the inputs.
502
+ def get_output_for(self, *in_expr, return_as_list=False, **dynamic_kwargs):
503
+ assert len(in_expr) == self.num_inputs
504
+ all_kwargs = dict(self.static_kwargs)
505
+ all_kwargs.update(dynamic_kwargs)
506
+ with tf.variable_scope(self.scope, reuse=True):
507
+ assert tf.get_variable_scope().name == self.scope
508
+ named_inputs = [tf.identity(expr, name=name) for expr, name in zip(in_expr, self.input_names)]
509
+ out_expr = self._build_func(*named_inputs, **all_kwargs)
510
+ assert is_tf_expression(out_expr) or isinstance(out_expr, tuple)
511
+ if return_as_list:
512
+ out_expr = [out_expr] if is_tf_expression(out_expr) else list(out_expr)
513
+ return out_expr
514
+
515
+ # Get the local name of a given variable, excluding any surrounding name scopes.
516
+ def get_var_localname(self, var_or_globalname):
517
+ assert is_tf_expression(var_or_globalname) or isinstance(var_or_globalname, str)
518
+ globalname = var_or_globalname if isinstance(var_or_globalname, str) else var_or_globalname.name
519
+ assert globalname.startswith(self.scope + '/')
520
+ localname = globalname[len(self.scope) + 1:]
521
+ localname = localname.split(':')[0]
522
+ return localname
523
+
524
+ # Find variable by local or global name.
525
+ def find_var(self, var_or_localname):
526
+ assert is_tf_expression(var_or_localname) or isinstance(var_or_localname, str)
527
+ return self.vars[var_or_localname] if isinstance(var_or_localname, str) else var_or_localname
528
+
529
+ # Get the value of a given variable as NumPy array.
530
+ # Note: This method is very inefficient -- prefer to use tfutil.run(list_of_vars) whenever possible.
531
+ def get_var(self, var_or_localname):
532
+ return self.find_var(var_or_localname).eval()
533
+
534
+ # Set the value of a given variable based on the given NumPy array.
535
+ # Note: This method is very inefficient -- prefer to use tfutil.set_vars() whenever possible.
536
+ def set_var(self, var_or_localname, new_value):
537
+ return set_vars({self.find_var(var_or_localname): new_value})
538
+
539
+ # Pickle export.
540
+ def __getstate__(self):
541
+ return {
542
+ 'version': 2,
543
+ 'name': self.name,
544
+ 'static_kwargs': self.static_kwargs,
545
+ 'build_module_src': self._build_module_src,
546
+ 'build_func_name': self._build_func_name,
547
+ 'variables': list(zip(self.vars.keys(), run(list(self.vars.values()))))}
548
+
549
+ # Pickle import.
550
+ def __setstate__(self, state):
551
+ self._init_fields()
552
+
553
+ # Execute custom import handlers.
554
+ for handler in network_import_handlers:
555
+ state = handler(state)
556
+
557
+ # Set basic fields.
558
+ assert state['version'] == 2
559
+ self.name = state['name']
560
+ self.static_kwargs = state['static_kwargs']
561
+ self._build_module_src = state['build_module_src']
562
+ self._build_func_name = state['build_func_name']
563
+
564
+ # Parse imported module.
565
+ module = imp.new_module('_tfutil_network_import_module_%d' % len(_network_import_modules))
566
+ exec(self._build_module_src, module.__dict__)
567
+ self._build_func = find_obj_in_module(module, self._build_func_name)
568
+ _network_import_modules.append(module) # avoid gc
569
+
570
+ # Init graph.
571
+ self._init_graph()
572
+ self.reset_vars()
573
+ set_vars({self.find_var(name): value for name, value in state['variables']})
574
+
575
+ # Create a clone of this network with its own copy of the variables.
576
+ def clone(self, name=None):
577
+ net = object.__new__(Network)
578
+ net._init_fields()
579
+ net.name = name if name is not None else self.name
580
+ net.static_kwargs = dict(self.static_kwargs)
581
+ net._build_module_src = self._build_module_src
582
+ net._build_func_name = self._build_func_name
583
+ net._build_func = self._build_func
584
+ net._init_graph()
585
+ net.copy_vars_from(self)
586
+ return net
587
+
588
+ # Copy the values of all variables from the given network.
589
+ def copy_vars_from(self, src_net):
590
+ assert isinstance(src_net, Network)
591
+ name_to_value = run({name: src_net.find_var(name) for name in self.vars.keys()})
592
+ set_vars({self.find_var(name): value for name, value in name_to_value.items()})
593
+
594
+ # Copy the values of all trainable variables from the given network.
595
+ def copy_trainables_from(self, src_net):
596
+ assert isinstance(src_net, Network)
597
+ name_to_value = run({name: src_net.find_var(name) for name in self.trainables.keys()})
598
+ set_vars({self.find_var(name): value for name, value in name_to_value.items()})
599
+
600
+ # Create new network with the given parameters, and copy all variables from this network.
601
+ def convert(self, name=None, func=None, **static_kwargs):
602
+ net = Network(name, func, **static_kwargs)
603
+ net.copy_vars_from(self)
604
+ return net
605
+
606
+ # Construct a TensorFlow op that updates the variables of this network
607
+ # to be slightly closer to those of the given network.
608
+ def setup_as_moving_average_of(self, src_net, beta=0.99, beta_nontrainable=0.0):
609
+ assert isinstance(src_net, Network)
610
+ with absolute_name_scope(self.scope):
611
+ with tf.name_scope('MovingAvg'):
612
+ ops = []
613
+ for name, var in self.vars.items():
614
+ if name in src_net.vars:
615
+ cur_beta = beta if name in self.trainables else beta_nontrainable
616
+ new_value = lerp(src_net.vars[name], var, cur_beta)
617
+ ops.append(var.assign(new_value))
618
+ return tf.group(*ops)
619
+
620
+ # Run this network for the given NumPy array(s), and return the output(s) as NumPy array(s).
621
+ def run(self, *in_arrays,
622
+ return_as_list = False, # True = return a list of NumPy arrays, False = return a single NumPy array, or a tuple if there are multiple outputs.
623
+ print_progress = False, # Print progress to the console? Useful for very large input arrays.
624
+ minibatch_size = None, # Maximum minibatch size to use, None = disable batching.
625
+ num_gpus = 1, # Number of GPUs to use.
626
+ out_mul = 1.0, # Multiplicative constant to apply to the output(s).
627
+ out_add = 0.0, # Additive constant to apply to the output(s).
628
+ out_shrink = 1, # Shrink the spatial dimensions of the output(s) by the given factor.
629
+ out_dtype = None, # Convert the output to the specified data type.
630
+ **dynamic_kwargs): # Additional keyword arguments to pass into the network construction function.
631
+
632
+ assert len(in_arrays) == self.num_inputs
633
+ num_items = in_arrays[0].shape[0]
634
+ if minibatch_size is None:
635
+ minibatch_size = num_items
636
+ key = str([list(sorted(dynamic_kwargs.items())), num_gpus, out_mul, out_add, out_shrink, out_dtype])
637
+
638
+ # Build graph.
639
+ if key not in self._run_cache:
640
+ with absolute_name_scope(self.scope + '/Run'), tf.control_dependencies(None):
641
+ in_split = list(zip(*[tf.split(x, num_gpus) for x in self.input_templates]))
642
+ out_split = []
643
+ for gpu in range(num_gpus):
644
+ with tf.device('/gpu:%d' % gpu):
645
+ out_expr = self.get_output_for(*in_split[gpu], return_as_list=True, **dynamic_kwargs)
646
+ if out_mul != 1.0:
647
+ out_expr = [x * out_mul for x in out_expr]
648
+ if out_add != 0.0:
649
+ out_expr = [x + out_add for x in out_expr]
650
+ if out_shrink > 1:
651
+ ksize = [1, 1, out_shrink, out_shrink]
652
+ out_expr = [tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding='VALID', data_format='NCHW') for x in out_expr]
653
+ if out_dtype is not None:
654
+ if tf.as_dtype(out_dtype).is_integer:
655
+ out_expr = [tf.round(x) for x in out_expr]
656
+ out_expr = [tf.saturate_cast(x, out_dtype) for x in out_expr]
657
+ out_split.append(out_expr)
658
+ self._run_cache[key] = [tf.concat(outputs, axis=0) for outputs in zip(*out_split)]
659
+
660
+ # Run minibatches.
661
+ out_expr = self._run_cache[key]
662
+ out_arrays = [np.empty([num_items] + shape_to_list(expr.shape)[1:], expr.dtype.name) for expr in out_expr]
663
+ for mb_begin in range(0, num_items, minibatch_size):
664
+ if print_progress:
665
+ print('\r%d / %d' % (mb_begin, num_items), end='')
666
+ mb_end = min(mb_begin + minibatch_size, num_items)
667
+ mb_in = [src[mb_begin : mb_end] for src in in_arrays]
668
+ mb_out = tf.get_default_session().run(out_expr, dict(zip(self.input_templates, mb_in)))
669
+ for dst, src in zip(out_arrays, mb_out):
670
+ dst[mb_begin : mb_end] = src
671
+
672
+ # Done.
673
+ if print_progress:
674
+ print('\r%d / %d' % (num_items, num_items))
675
+ if not return_as_list:
676
+ out_arrays = out_arrays[0] if len(out_arrays) == 1 else tuple(out_arrays)
677
+ return out_arrays
678
+
679
+ # Returns a list of (name, output_expr, trainable_vars) tuples corresponding to
680
+ # individual layers of the network. Mainly intended to be used for reporting.
681
+ def list_layers(self):
682
+ patterns_to_ignore = ['/Setter', '/new_value', '/Shape', '/strided_slice', '/Cast', '/concat']
683
+ all_ops = tf.get_default_graph().get_operations()
684
+ all_ops = [op for op in all_ops if not any(p in op.name for p in patterns_to_ignore)]
685
+ layers = []
686
+
687
+ def recurse(scope, parent_ops, level):
688
+ prefix = scope + '/'
689
+ ops = [op for op in parent_ops if op.name == scope or op.name.startswith(prefix)]
690
+
691
+ # Does not contain leaf nodes => expand immediate children.
692
+ if level == 0 or all('/' in op.name[len(prefix):] for op in ops):
693
+ visited = set()
694
+ for op in ops:
695
+ suffix = op.name[len(prefix):]
696
+ if '/' in suffix:
697
+ suffix = suffix[:suffix.index('/')]
698
+ if suffix not in visited:
699
+ recurse(prefix + suffix, ops, level + 1)
700
+ visited.add(suffix)
701
+
702
+ # Otherwise => interpret as a layer.
703
+ else:
704
+ layer_name = scope[len(self.scope)+1:]
705
+ layer_output = ops[-1].outputs[0]
706
+ layer_trainables = [op.outputs[0] for op in ops if op.type.startswith('Variable') and self.get_var_localname(op.name) in self.trainables]
707
+ layers.append((layer_name, layer_output, layer_trainables))
708
+
709
+ recurse(self.scope, all_ops, 0)
710
+ return layers
711
+
712
+ # Print a summary table of the network structure.
713
+ def print_layers(self, title=None, hide_layers_with_no_params=False):
714
+ if title is None: title = self.name
715
+ print()
716
+ print('%-28s%-12s%-24s%-24s' % (title, 'Params', 'OutputShape', 'WeightShape'))
717
+ print('%-28s%-12s%-24s%-24s' % (('---',) * 4))
718
+
719
+ total_params = 0
720
+ for layer_name, layer_output, layer_trainables in self.list_layers():
721
+ weights = [var for var in layer_trainables if var.name.endswith('/weight:0')]
722
+ num_params = sum(np.prod(shape_to_list(var.shape)) for var in layer_trainables)
723
+ total_params += num_params
724
+ if hide_layers_with_no_params and num_params == 0:
725
+ continue
726
+
727
+ print('%-28s%-12s%-24s%-24s' % (
728
+ layer_name,
729
+ num_params if num_params else '-',
730
+ layer_output.shape,
731
+ weights[0].shape if len(weights) == 1 else '-'))
732
+
733
+ print('%-28s%-12s%-24s%-24s' % (('---',) * 4))
734
+ print('%-28s%-12s%-24s%-24s' % ('Total', total_params, '', ''))
735
+ print()
736
+
737
+ # Construct summary ops to include histograms of all trainable parameters in TensorBoard.
738
+ def setup_weight_histograms(self, title=None):
739
+ if title is None: title = self.name
740
+ with tf.name_scope(None), tf.device(None), tf.control_dependencies(None):
741
+ for localname, var in self.trainables.items():
742
+ if '/' in localname:
743
+ p = localname.split('/')
744
+ name = title + '_' + p[-1] + '/' + '_'.join(p[:-1])
745
+ else:
746
+ name = title + '_toplevel/' + localname
747
+ tf.summary.histogram(name, var)
748
+
749
+ #----------------------------------------------------------------------------
models/pggan_tf_official/train.py ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
2
+ #
3
+ # This work is licensed under the Creative Commons Attribution-NonCommercial
4
+ # 4.0 International License. To view a copy of this license, visit
5
+ # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
6
+ # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
7
+
8
+ import os
9
+ import time
10
+ import numpy as np
11
+ import tensorflow as tf
12
+
13
+ import config
14
+ import tfutil
15
+ import dataset
16
+ import misc
17
+
18
+ #----------------------------------------------------------------------------
19
+ # Choose the size and contents of the image snapshot grids that are exported
20
+ # periodically during training.
21
+
22
+ def setup_snapshot_image_grid(G, training_set,
23
+ size = '1080p', # '1080p' = to be viewed on 1080p display, '4k' = to be viewed on 4k display.
24
+ layout = 'random'): # 'random' = grid contents are selected randomly, 'row_per_class' = each row corresponds to one class label.
25
+
26
+ # Select size.
27
+ gw = 1; gh = 1
28
+ if size == '1080p':
29
+ gw = np.clip(1920 // G.output_shape[3], 3, 32)
30
+ gh = np.clip(1080 // G.output_shape[2], 2, 32)
31
+ if size == '4k':
32
+ gw = np.clip(3840 // G.output_shape[3], 7, 32)
33
+ gh = np.clip(2160 // G.output_shape[2], 4, 32)
34
+
35
+ # Fill in reals and labels.
36
+ reals = np.zeros([gw * gh] + training_set.shape, dtype=training_set.dtype)
37
+ labels = np.zeros([gw * gh, training_set.label_size], dtype=training_set.label_dtype)
38
+ for idx in range(gw * gh):
39
+ x = idx % gw; y = idx // gw
40
+ while True:
41
+ real, label = training_set.get_minibatch_np(1)
42
+ if layout == 'row_per_class' and training_set.label_size > 0:
43
+ if label[0, y % training_set.label_size] == 0.0:
44
+ continue
45
+ reals[idx] = real[0]
46
+ labels[idx] = label[0]
47
+ break
48
+
49
+ # Generate latents.
50
+ latents = misc.random_latents(gw * gh, G)
51
+ return (gw, gh), reals, labels, latents
52
+
53
+ #----------------------------------------------------------------------------
54
+ # Just-in-time processing of training images before feeding them to the networks.
55
+
56
+ def process_reals(x, lod, mirror_augment, drange_data, drange_net):
57
+ with tf.name_scope('ProcessReals'):
58
+ with tf.name_scope('DynamicRange'):
59
+ x = tf.cast(x, tf.float32)
60
+ x = misc.adjust_dynamic_range(x, drange_data, drange_net)
61
+ if mirror_augment:
62
+ with tf.name_scope('MirrorAugment'):
63
+ s = tf.shape(x)
64
+ mask = tf.random_uniform([s[0], 1, 1, 1], 0.0, 1.0)
65
+ mask = tf.tile(mask, [1, s[1], s[2], s[3]])
66
+ x = tf.where(mask < 0.5, x, tf.reverse(x, axis=[3]))
67
+ with tf.name_scope('FadeLOD'): # Smooth crossfade between consecutive levels-of-detail.
68
+ s = tf.shape(x)
69
+ y = tf.reshape(x, [-1, s[1], s[2]//2, 2, s[3]//2, 2])
70
+ y = tf.reduce_mean(y, axis=[3, 5], keepdims=True)
71
+ y = tf.tile(y, [1, 1, 1, 2, 1, 2])
72
+ y = tf.reshape(y, [-1, s[1], s[2], s[3]])
73
+ x = tfutil.lerp(x, y, lod - tf.floor(lod))
74
+ with tf.name_scope('UpscaleLOD'): # Upscale to match the expected input/output size of the networks.
75
+ s = tf.shape(x)
76
+ factor = tf.cast(2 ** tf.floor(lod), tf.int32)
77
+ x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1])
78
+ x = tf.tile(x, [1, 1, 1, factor, 1, factor])
79
+ x = tf.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor])
80
+ return x
81
+
82
+ #----------------------------------------------------------------------------
83
+ # Class for evaluating and storing the values of time-varying training parameters.
84
+
85
+ class TrainingSchedule:
86
+ def __init__(
87
+ self,
88
+ cur_nimg,
89
+ training_set,
90
+ lod_initial_resolution = 4, # Image resolution used at the beginning.
91
+ lod_training_kimg = 600, # Thousands of real images to show before doubling the resolution.
92
+ lod_transition_kimg = 600, # Thousands of real images to show when fading in new layers.
93
+ minibatch_base = 16, # Maximum minibatch size, divided evenly among GPUs.
94
+ minibatch_dict = {}, # Resolution-specific overrides.
95
+ max_minibatch_per_gpu = {}, # Resolution-specific maximum minibatch size per GPU.
96
+ G_lrate_base = 0.001, # Learning rate for the generator.
97
+ G_lrate_dict = {}, # Resolution-specific overrides.
98
+ D_lrate_base = 0.001, # Learning rate for the discriminator.
99
+ D_lrate_dict = {}, # Resolution-specific overrides.
100
+ tick_kimg_base = 160, # Default interval of progress snapshots.
101
+ tick_kimg_dict = {4: 160, 8:140, 16:120, 32:100, 64:80, 128:60, 256:40, 512:20, 1024:10}): # Resolution-specific overrides.
102
+
103
+ # Training phase.
104
+ self.kimg = cur_nimg / 1000.0
105
+ phase_dur = lod_training_kimg + lod_transition_kimg
106
+ phase_idx = int(np.floor(self.kimg / phase_dur)) if phase_dur > 0 else 0
107
+ phase_kimg = self.kimg - phase_idx * phase_dur
108
+
109
+ # Level-of-detail and resolution.
110
+ self.lod = training_set.resolution_log2
111
+ self.lod -= np.floor(np.log2(lod_initial_resolution))
112
+ self.lod -= phase_idx
113
+ if lod_transition_kimg > 0:
114
+ self.lod -= max(phase_kimg - lod_training_kimg, 0.0) / lod_transition_kimg
115
+ self.lod = max(self.lod, 0.0)
116
+ self.resolution = 2 ** (training_set.resolution_log2 - int(np.floor(self.lod)))
117
+
118
+ # Minibatch size.
119
+ self.minibatch = minibatch_dict.get(self.resolution, minibatch_base)
120
+ self.minibatch -= self.minibatch % config.num_gpus
121
+ if self.resolution in max_minibatch_per_gpu:
122
+ self.minibatch = min(self.minibatch, max_minibatch_per_gpu[self.resolution] * config.num_gpus)
123
+
124
+ # Other parameters.
125
+ self.G_lrate = G_lrate_dict.get(self.resolution, G_lrate_base)
126
+ self.D_lrate = D_lrate_dict.get(self.resolution, D_lrate_base)
127
+ self.tick_kimg = tick_kimg_dict.get(self.resolution, tick_kimg_base)
128
+
129
+ #----------------------------------------------------------------------------
130
+ # Main training script.
131
+ # To run, comment/uncomment appropriate lines in config.py and launch train.py.
132
+
133
+ def train_progressive_gan(
134
+ G_smoothing = 0.999, # Exponential running average of generator weights.
135
+ D_repeats = 1, # How many times the discriminator is trained per G iteration.
136
+ minibatch_repeats = 4, # Number of minibatches to run before adjusting training parameters.
137
+ reset_opt_for_new_lod = True, # Reset optimizer internal state (e.g. Adam moments) when new layers are introduced?
138
+ total_kimg = 15000, # Total length of the training, measured in thousands of real images.
139
+ mirror_augment = False, # Enable mirror augment?
140
+ drange_net = [-1,1], # Dynamic range used when feeding image data to the networks.
141
+ image_snapshot_ticks = 1, # How often to export image snapshots?
142
+ network_snapshot_ticks = 10, # How often to export network snapshots?
143
+ save_tf_graph = False, # Include full TensorFlow computation graph in the tfevents file?
144
+ save_weight_histograms = False, # Include weight histograms in the tfevents file?
145
+ resume_run_id = None, # Run ID or network pkl to resume training from, None = start from scratch.
146
+ resume_snapshot = None, # Snapshot index to resume training from, None = autodetect.
147
+ resume_kimg = 0.0, # Assumed training progress at the beginning. Affects reporting and training schedule.
148
+ resume_time = 0.0): # Assumed wallclock time at the beginning. Affects reporting.
149
+
150
+ maintenance_start_time = time.time()
151
+ training_set = dataset.load_dataset(data_dir=config.data_dir, verbose=True, **config.dataset)
152
+
153
+ # Construct networks.
154
+ with tf.device('/gpu:0'):
155
+ if resume_run_id is not None:
156
+ network_pkl = misc.locate_network_pkl(resume_run_id, resume_snapshot)
157
+ print('Loading networks from "%s"...' % network_pkl)
158
+ G, D, Gs = misc.load_pkl(network_pkl)
159
+ else:
160
+ print('Constructing networks...')
161
+ G = tfutil.Network('G', num_channels=training_set.shape[0], resolution=training_set.shape[1], label_size=training_set.label_size, **config.G)
162
+ D = tfutil.Network('D', num_channels=training_set.shape[0], resolution=training_set.shape[1], label_size=training_set.label_size, **config.D)
163
+ Gs = G.clone('Gs')
164
+ Gs_update_op = Gs.setup_as_moving_average_of(G, beta=G_smoothing)
165
+ G.print_layers(); D.print_layers()
166
+
167
+ print('Building TensorFlow graph...')
168
+ with tf.name_scope('Inputs'):
169
+ lod_in = tf.placeholder(tf.float32, name='lod_in', shape=[])
170
+ lrate_in = tf.placeholder(tf.float32, name='lrate_in', shape=[])
171
+ minibatch_in = tf.placeholder(tf.int32, name='minibatch_in', shape=[])
172
+ minibatch_split = minibatch_in // config.num_gpus
173
+ reals, labels = training_set.get_minibatch_tf()
174
+ reals_split = tf.split(reals, config.num_gpus)
175
+ labels_split = tf.split(labels, config.num_gpus)
176
+ G_opt = tfutil.Optimizer(name='TrainG', learning_rate=lrate_in, **config.G_opt)
177
+ D_opt = tfutil.Optimizer(name='TrainD', learning_rate=lrate_in, **config.D_opt)
178
+ for gpu in range(config.num_gpus):
179
+ with tf.name_scope('GPU%d' % gpu), tf.device('/gpu:%d' % gpu):
180
+ G_gpu = G if gpu == 0 else G.clone(G.name + '_shadow')
181
+ D_gpu = D if gpu == 0 else D.clone(D.name + '_shadow')
182
+ lod_assign_ops = [tf.assign(G_gpu.find_var('lod'), lod_in), tf.assign(D_gpu.find_var('lod'), lod_in)]
183
+ reals_gpu = process_reals(reals_split[gpu], lod_in, mirror_augment, training_set.dynamic_range, drange_net)
184
+ labels_gpu = labels_split[gpu]
185
+ with tf.name_scope('G_loss'), tf.control_dependencies(lod_assign_ops):
186
+ G_loss = tfutil.call_func_by_name(G=G_gpu, D=D_gpu, opt=G_opt, training_set=training_set, minibatch_size=minibatch_split, **config.G_loss)
187
+ with tf.name_scope('D_loss'), tf.control_dependencies(lod_assign_ops):
188
+ D_loss = tfutil.call_func_by_name(G=G_gpu, D=D_gpu, opt=D_opt, training_set=training_set, minibatch_size=minibatch_split, reals=reals_gpu, labels=labels_gpu, **config.D_loss)
189
+ G_opt.register_gradients(tf.reduce_mean(G_loss), G_gpu.trainables)
190
+ D_opt.register_gradients(tf.reduce_mean(D_loss), D_gpu.trainables)
191
+ G_train_op = G_opt.apply_updates()
192
+ D_train_op = D_opt.apply_updates()
193
+
194
+ print('Setting up snapshot image grid...')
195
+ grid_size, grid_reals, grid_labels, grid_latents = setup_snapshot_image_grid(G, training_set, **config.grid)
196
+ sched = TrainingSchedule(total_kimg * 1000, training_set, **config.sched)
197
+ grid_fakes = Gs.run(grid_latents, grid_labels, minibatch_size=sched.minibatch//config.num_gpus)
198
+
199
+ print('Setting up result dir...')
200
+ result_subdir = misc.create_result_subdir(config.result_dir, config.desc)
201
+ misc.save_image_grid(grid_reals, os.path.join(result_subdir, 'reals.png'), drange=training_set.dynamic_range, grid_size=grid_size)
202
+ misc.save_image_grid(grid_fakes, os.path.join(result_subdir, 'fakes%06d.png' % 0), drange=drange_net, grid_size=grid_size)
203
+ summary_log = tf.summary.FileWriter(result_subdir)
204
+ if save_tf_graph:
205
+ summary_log.add_graph(tf.get_default_graph())
206
+ if save_weight_histograms:
207
+ G.setup_weight_histograms(); D.setup_weight_histograms()
208
+
209
+ print('Training...')
210
+ cur_nimg = int(resume_kimg * 1000)
211
+ cur_tick = 0
212
+ tick_start_nimg = cur_nimg
213
+ tick_start_time = time.time()
214
+ train_start_time = tick_start_time - resume_time
215
+ prev_lod = -1.0
216
+ while cur_nimg < total_kimg * 1000:
217
+
218
+ # Choose training parameters and configure training ops.
219
+ sched = TrainingSchedule(cur_nimg, training_set, **config.sched)
220
+ training_set.configure(sched.minibatch, sched.lod)
221
+ if reset_opt_for_new_lod:
222
+ if np.floor(sched.lod) != np.floor(prev_lod) or np.ceil(sched.lod) != np.ceil(prev_lod):
223
+ G_opt.reset_optimizer_state(); D_opt.reset_optimizer_state()
224
+ prev_lod = sched.lod
225
+
226
+ # Run training ops.
227
+ for repeat in range(minibatch_repeats):
228
+ for _ in range(D_repeats):
229
+ tfutil.run([D_train_op, Gs_update_op], {lod_in: sched.lod, lrate_in: sched.D_lrate, minibatch_in: sched.minibatch})
230
+ cur_nimg += sched.minibatch
231
+ tfutil.run([G_train_op], {lod_in: sched.lod, lrate_in: sched.G_lrate, minibatch_in: sched.minibatch})
232
+
233
+ # Perform maintenance tasks once per tick.
234
+ done = (cur_nimg >= total_kimg * 1000)
235
+ if cur_nimg >= tick_start_nimg + sched.tick_kimg * 1000 or done:
236
+ cur_tick += 1
237
+ cur_time = time.time()
238
+ tick_kimg = (cur_nimg - tick_start_nimg) / 1000.0
239
+ tick_start_nimg = cur_nimg
240
+ tick_time = cur_time - tick_start_time
241
+ total_time = cur_time - train_start_time
242
+ maintenance_time = tick_start_time - maintenance_start_time
243
+ maintenance_start_time = cur_time
244
+
245
+ # Report progress.
246
+ print('tick %-5d kimg %-8.1f lod %-5.2f minibatch %-4d time %-12s sec/tick %-7.1f sec/kimg %-7.2f maintenance %.1f' % (
247
+ tfutil.autosummary('Progress/tick', cur_tick),
248
+ tfutil.autosummary('Progress/kimg', cur_nimg / 1000.0),
249
+ tfutil.autosummary('Progress/lod', sched.lod),
250
+ tfutil.autosummary('Progress/minibatch', sched.minibatch),
251
+ misc.format_time(tfutil.autosummary('Timing/total_sec', total_time)),
252
+ tfutil.autosummary('Timing/sec_per_tick', tick_time),
253
+ tfutil.autosummary('Timing/sec_per_kimg', tick_time / tick_kimg),
254
+ tfutil.autosummary('Timing/maintenance_sec', maintenance_time)))
255
+ tfutil.autosummary('Timing/total_hours', total_time / (60.0 * 60.0))
256
+ tfutil.autosummary('Timing/total_days', total_time / (24.0 * 60.0 * 60.0))
257
+ tfutil.save_summaries(summary_log, cur_nimg)
258
+
259
+ # Save snapshots.
260
+ if cur_tick % image_snapshot_ticks == 0 or done:
261
+ grid_fakes = Gs.run(grid_latents, grid_labels, minibatch_size=sched.minibatch//config.num_gpus)
262
+ misc.save_image_grid(grid_fakes, os.path.join(result_subdir, 'fakes%06d.png' % (cur_nimg // 1000)), drange=drange_net, grid_size=grid_size)
263
+ if cur_tick % network_snapshot_ticks == 0 or done:
264
+ misc.save_pkl((G, D, Gs), os.path.join(result_subdir, 'network-snapshot-%06d.pkl' % (cur_nimg // 1000)))
265
+
266
+ # Record start time of the next tick.
267
+ tick_start_time = time.time()
268
+
269
+ # Write final results.
270
+ misc.save_pkl((G, D, Gs), os.path.join(result_subdir, 'network-final.pkl'))
271
+ summary_log.close()
272
+ open(os.path.join(result_subdir, '_training-done.txt'), 'wt').close()
273
+
274
+ #----------------------------------------------------------------------------
275
+ # Main entry point.
276
+ # Calls the function indicated in config.py.
277
+
278
+ if __name__ == "__main__":
279
+ misc.init_output_logging()
280
+ np.random.seed(config.random_seed)
281
+ print('Initializing TensorFlow...')
282
+ os.environ.update(config.env)
283
+ tfutil.init_tf(config.tf_config)
284
+ print('Running %s()...' % config.train['func'])
285
+ tfutil.call_func_by_name(**config.train)
286
+ print('Exiting...')
287
+
288
+ #----------------------------------------------------------------------------