diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..f2a723b6f19a8f7f13f32c93a972c54a8c754ed6 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +*.pkl \ No newline at end of file diff --git a/app.py b/app.py new file mode 100644 index 0000000000000000000000000000000000000000..059359777dea654521b3ac46ceb2fa6b14c1195c --- /dev/null +++ b/app.py @@ -0,0 +1,103 @@ +import os +import sys + +import torch +import cv2 +import PIL.Image +import numpy as np +import gradio as gr +from yarg import get + +from models.stylegan_generator import StyleGANGenerator +from models.stylegan2_generator import StyleGAN2Generator + +VALID_CHOICES = [ + "Bald", + "Young", + "Mustache", + "Eyeglasses", + "Hat", + "Smiling" +] +ENABLE_GPU = False +MODEL_NAMES = [ + 'stylegan_ffhq', + 'stylegan2_ffhq' +] +NB_IMG = 4 +OUTPUT_LIST = [gr.outputs.Image(type="pil", label="Generated Image") for _ in range(NB_IMG)] + [gr.outputs.Image(type="pil", label="Modified Image") for _ in range(NB_IMG)] + +def tensor_to_pil(input_object): + """Shows images in one figure.""" + if isinstance(input_object, dict): + im_array = [] + images = input_object['image'] + else: + images = input_object + for _, image in enumerate(images): + im_array.append(PIL.Image.fromarray(image)) + return im_array + +def get_generator(model_name): + if model_name == 'stylegan_ffhq': + generator = StyleGANGenerator(model_name) + elif model_name == 'stylegan2_ffhq': + generator = StyleGAN2Generator(model_name) + else: + raise ValueError('Model name not recognized') + if ENABLE_GPU: + generator = generator.cuda() + return generator + + +def inference(seed, choice, model_name, coef, nb_images=NB_IMG): + np.random.seed(seed) + + boundary = np.squeeze(np.load(open(os.path.join('boundaries', model_name, 'boundary_%s.npy' % choice), 'rb'))) + generator = get_generator(model_name) + latent_codes = generator.easy_sample(nb_images) + if ENABLE_GPU: + latent_codes = latent_codes.cuda() + generator = generator.cuda() + generated_images = generator.easy_synthesize(latent_codes) + generated_images = tensor_to_pil(generated_images) + + new_latent_codes = latent_codes.copy() + for i, _ in enumerate(generated_images): + new_latent_codes[i, :] += boundary*coef + + modified_generated_images = generator.easy_synthesize(new_latent_codes) + modified_generated_images = tensor_to_pil(modified_generated_images) + + return generated_images + modified_generated_images + + +iface = gr.Interface( + fn=inference, + inputs=[ + gr.inputs.Slider( + minimum=0, + maximum=1000, + step=1, + default=264, + ), + gr.inputs.Dropdown( + choices=VALID_CHOICES, + type="value", + ), + gr.inputs.Dropdown( + choices=MODEL_NAMES, + type="value", + ), + gr.inputs.Slider( + minimum=-3, + maximum=3, + step=0.1, + default=0, + ), + ], + outputs=OUTPUT_LIST, + layout="horizontal", + theme="peach" +) +iface.launch() \ No newline at end of file diff --git a/boundaries/stylegan2_ffhq/boundary_Bald.npy b/boundaries/stylegan2_ffhq/boundary_Bald.npy new file mode 100644 index 0000000000000000000000000000000000000000..5329059a1860b78491621803f9a5fa8f18f32a89 Binary files /dev/null and b/boundaries/stylegan2_ffhq/boundary_Bald.npy differ diff --git a/boundaries/stylegan2_ffhq/boundary_Eyeglasses.npy b/boundaries/stylegan2_ffhq/boundary_Eyeglasses.npy new file mode 100644 index 0000000000000000000000000000000000000000..8a86c12a9f31af628b59ac866a065c138901ad19 Binary files /dev/null and b/boundaries/stylegan2_ffhq/boundary_Eyeglasses.npy differ diff --git a/boundaries/stylegan2_ffhq/boundary_Hat.npy b/boundaries/stylegan2_ffhq/boundary_Hat.npy new file mode 100644 index 0000000000000000000000000000000000000000..4da4d475b6349fca3652a2c8f7fcd6cfed878f0d Binary files /dev/null and b/boundaries/stylegan2_ffhq/boundary_Hat.npy differ diff --git a/boundaries/stylegan2_ffhq/boundary_Mustache.npy b/boundaries/stylegan2_ffhq/boundary_Mustache.npy new file mode 100644 index 0000000000000000000000000000000000000000..78a9e2002e2efc788aac37c7d0ed3f85a2121b81 Binary files /dev/null and b/boundaries/stylegan2_ffhq/boundary_Mustache.npy differ diff --git a/boundaries/stylegan2_ffhq/boundary_Smiling.npy b/boundaries/stylegan2_ffhq/boundary_Smiling.npy new file mode 100644 index 0000000000000000000000000000000000000000..23e71b68bda2891c191ac204cdf5cfc05920ff8d Binary files /dev/null and b/boundaries/stylegan2_ffhq/boundary_Smiling.npy differ diff --git a/boundaries/stylegan2_ffhq/boundary_Young.npy b/boundaries/stylegan2_ffhq/boundary_Young.npy new file mode 100644 index 0000000000000000000000000000000000000000..10d1ee3cc9739a4788bac01551119492b7a752b1 Binary files /dev/null and b/boundaries/stylegan2_ffhq/boundary_Young.npy differ diff --git a/boundaries/stylegan_ffhq/boundary_Bald.npy b/boundaries/stylegan_ffhq/boundary_Bald.npy new file mode 100644 index 0000000000000000000000000000000000000000..327d58a77786fa4631428115363ba76523d2ddf8 Binary files /dev/null and b/boundaries/stylegan_ffhq/boundary_Bald.npy differ diff --git a/boundaries/stylegan_ffhq/boundary_Eyeglasses.npy b/boundaries/stylegan_ffhq/boundary_Eyeglasses.npy new file mode 100644 index 0000000000000000000000000000000000000000..e7613f080a6231a8c13bcd501f67c1cde696d1bc Binary files /dev/null and b/boundaries/stylegan_ffhq/boundary_Eyeglasses.npy differ diff --git a/boundaries/stylegan_ffhq/boundary_Hat.npy b/boundaries/stylegan_ffhq/boundary_Hat.npy new file mode 100644 index 0000000000000000000000000000000000000000..b5232eb1f251db3b89fa1fb112531897f6d0c39a Binary files /dev/null and b/boundaries/stylegan_ffhq/boundary_Hat.npy differ diff --git a/boundaries/stylegan_ffhq/boundary_Mustache.npy b/boundaries/stylegan_ffhq/boundary_Mustache.npy new file mode 100644 index 0000000000000000000000000000000000000000..a9371a7e96efc51a6dc6e35f1469f9f771be4981 Binary files /dev/null and b/boundaries/stylegan_ffhq/boundary_Mustache.npy differ diff --git a/boundaries/stylegan_ffhq/boundary_Smiling.npy b/boundaries/stylegan_ffhq/boundary_Smiling.npy new file mode 100644 index 0000000000000000000000000000000000000000..94273a41274d8bfe3fd93e1a7c9cb0a812aadf70 Binary files /dev/null and b/boundaries/stylegan_ffhq/boundary_Smiling.npy differ diff --git a/boundaries/stylegan_ffhq/boundary_Young.npy b/boundaries/stylegan_ffhq/boundary_Young.npy new file mode 100644 index 0000000000000000000000000000000000000000..08cbcc56f495cbac52fb274e3216ac31c63b9a19 Binary files /dev/null and b/boundaries/stylegan_ffhq/boundary_Young.npy differ diff --git a/dnnlib/__init__.py b/dnnlib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e7423bffe245d0ff3f32e8658aa67daae454e64e --- /dev/null +++ b/dnnlib/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# NVIDIA CORPORATION and its licensors retain all intellectual property +# and proprietary rights in and to this software, related documentation +# and any modifications thereto. Any use, reproduction, disclosure or +# distribution of this software and related documentation without an express +# license agreement from NVIDIA CORPORATION is strictly prohibited. + +from .util import EasyDict, make_cache_dir_path diff --git a/dnnlib/__pycache__/__init__.cpython-38.pyc b/dnnlib/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e5a961ee4d812f7ca6582b47ddfea7f343dbc18 Binary files /dev/null and b/dnnlib/__pycache__/__init__.cpython-38.pyc differ diff --git a/dnnlib/__pycache__/util.cpython-38.pyc b/dnnlib/__pycache__/util.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ea50b6cbc1b0233e434d9b8b3d76f554febe753 Binary files /dev/null and b/dnnlib/__pycache__/util.cpython-38.pyc differ diff --git a/dnnlib/util.py b/dnnlib/util.py new file mode 100644 index 0000000000000000000000000000000000000000..6bbdf3bd8fe1c138cd969d37dcc52190b45c4c16 --- /dev/null +++ b/dnnlib/util.py @@ -0,0 +1,491 @@ +# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# NVIDIA CORPORATION and its licensors retain all intellectual property +# and proprietary rights in and to this software, related documentation +# and any modifications thereto. Any use, reproduction, disclosure or +# distribution of this software and related documentation without an express +# license agreement from NVIDIA CORPORATION is strictly prohibited. + +"""Miscellaneous utility classes and functions.""" + +import ctypes +import fnmatch +import importlib +import inspect +import numpy as np +import os +import shutil +import sys +import types +import io +import pickle +import re +import requests +import html +import hashlib +import glob +import tempfile +import urllib +import urllib.request +import uuid + +from distutils.util import strtobool +from typing import Any, List, Tuple, Union + + +# Util classes +# ------------------------------------------------------------------------------------------ + + +class EasyDict(dict): + """Convenience class that behaves like a dict but allows access with the attribute syntax.""" + + def __getattr__(self, name: str) -> Any: + try: + return self[name] + except KeyError: + raise AttributeError(name) + + def __setattr__(self, name: str, value: Any) -> None: + self[name] = value + + def __delattr__(self, name: str) -> None: + del self[name] + + +class Logger(object): + """Redirect stderr to stdout, optionally print stdout to a file, and optionally force flushing on both stdout and the file.""" + + def __init__(self, file_name: str = None, file_mode: str = "w", should_flush: bool = True): + self.file = None + + if file_name is not None: + self.file = open(file_name, file_mode) + + self.should_flush = should_flush + self.stdout = sys.stdout + self.stderr = sys.stderr + + sys.stdout = self + sys.stderr = self + + def __enter__(self) -> "Logger": + return self + + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: + self.close() + + def write(self, text: Union[str, bytes]) -> None: + """Write text to stdout (and a file) and optionally flush.""" + if isinstance(text, bytes): + text = text.decode() + if len(text) == 0: # workaround for a bug in VSCode debugger: sys.stdout.write(''); sys.stdout.flush() => crash + return + + if self.file is not None: + self.file.write(text) + + self.stdout.write(text) + + if self.should_flush: + self.flush() + + def flush(self) -> None: + """Flush written text to both stdout and a file, if open.""" + if self.file is not None: + self.file.flush() + + self.stdout.flush() + + def close(self) -> None: + """Flush, close possible files, and remove stdout/stderr mirroring.""" + self.flush() + + # if using multiple loggers, prevent closing in wrong order + if sys.stdout is self: + sys.stdout = self.stdout + if sys.stderr is self: + sys.stderr = self.stderr + + if self.file is not None: + self.file.close() + self.file = None + + +# Cache directories +# ------------------------------------------------------------------------------------------ + +_dnnlib_cache_dir = None + +def set_cache_dir(path: str) -> None: + global _dnnlib_cache_dir + _dnnlib_cache_dir = path + +def make_cache_dir_path(*paths: str) -> str: + if _dnnlib_cache_dir is not None: + return os.path.join(_dnnlib_cache_dir, *paths) + if 'DNNLIB_CACHE_DIR' in os.environ: + return os.path.join(os.environ['DNNLIB_CACHE_DIR'], *paths) + if 'HOME' in os.environ: + return os.path.join(os.environ['HOME'], '.cache', 'dnnlib', *paths) + if 'USERPROFILE' in os.environ: + return os.path.join(os.environ['USERPROFILE'], '.cache', 'dnnlib', *paths) + return os.path.join(tempfile.gettempdir(), '.cache', 'dnnlib', *paths) + +# Small util functions +# ------------------------------------------------------------------------------------------ + + +def format_time(seconds: Union[int, float]) -> str: + """Convert the seconds to human readable string with days, hours, minutes and seconds.""" + s = int(np.rint(seconds)) + + if s < 60: + return "{0}s".format(s) + elif s < 60 * 60: + return "{0}m {1:02}s".format(s // 60, s % 60) + elif s < 24 * 60 * 60: + return "{0}h {1:02}m {2:02}s".format(s // (60 * 60), (s // 60) % 60, s % 60) + else: + return "{0}d {1:02}h {2:02}m".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24, (s // 60) % 60) + + +def format_time_brief(seconds: Union[int, float]) -> str: + """Convert the seconds to human readable string with days, hours, minutes and seconds.""" + s = int(np.rint(seconds)) + + if s < 60: + return "{0}s".format(s) + elif s < 60 * 60: + return "{0}m {1:02}s".format(s // 60, s % 60) + elif s < 24 * 60 * 60: + return "{0}h {1:02}m".format(s // (60 * 60), (s // 60) % 60) + else: + return "{0}d {1:02}h".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24) + + +def ask_yes_no(question: str) -> bool: + """Ask the user the question until the user inputs a valid answer.""" + while True: + try: + print("{0} [y/n]".format(question)) + return strtobool(input().lower()) + except ValueError: + pass + + +def tuple_product(t: Tuple) -> Any: + """Calculate the product of the tuple elements.""" + result = 1 + + for v in t: + result *= v + + return result + + +_str_to_ctype = { + "uint8": ctypes.c_ubyte, + "uint16": ctypes.c_uint16, + "uint32": ctypes.c_uint32, + "uint64": ctypes.c_uint64, + "int8": ctypes.c_byte, + "int16": ctypes.c_int16, + "int32": ctypes.c_int32, + "int64": ctypes.c_int64, + "float32": ctypes.c_float, + "float64": ctypes.c_double +} + + +def get_dtype_and_ctype(type_obj: Any) -> Tuple[np.dtype, Any]: + """Given a type name string (or an object having a __name__ attribute), return matching Numpy and ctypes types that have the same size in bytes.""" + type_str = None + + if isinstance(type_obj, str): + type_str = type_obj + elif hasattr(type_obj, "__name__"): + type_str = type_obj.__name__ + elif hasattr(type_obj, "name"): + type_str = type_obj.name + else: + raise RuntimeError("Cannot infer type name from input") + + assert type_str in _str_to_ctype.keys() + + my_dtype = np.dtype(type_str) + my_ctype = _str_to_ctype[type_str] + + assert my_dtype.itemsize == ctypes.sizeof(my_ctype) + + return my_dtype, my_ctype + + +def is_pickleable(obj: Any) -> bool: + try: + with io.BytesIO() as stream: + pickle.dump(obj, stream) + return True + except: + return False + + +# Functionality to import modules/objects by name, and call functions by name +# ------------------------------------------------------------------------------------------ + +def get_module_from_obj_name(obj_name: str) -> Tuple[types.ModuleType, str]: + """Searches for the underlying module behind the name to some python object. + Returns the module and the object name (original name with module part removed).""" + + # allow convenience shorthands, substitute them by full names + obj_name = re.sub("^np.", "numpy.", obj_name) + obj_name = re.sub("^tf.", "tensorflow.", obj_name) + + # list alternatives for (module_name, local_obj_name) + parts = obj_name.split(".") + name_pairs = [(".".join(parts[:i]), ".".join(parts[i:])) for i in range(len(parts), 0, -1)] + + # try each alternative in turn + for module_name, local_obj_name in name_pairs: + try: + module = importlib.import_module(module_name) # may raise ImportError + get_obj_from_module(module, local_obj_name) # may raise AttributeError + return module, local_obj_name + except: + pass + + # maybe some of the modules themselves contain errors? + for module_name, _local_obj_name in name_pairs: + try: + importlib.import_module(module_name) # may raise ImportError + except ImportError: + if not str(sys.exc_info()[1]).startswith("No module named '" + module_name + "'"): + raise + + # maybe the requested attribute is missing? + for module_name, local_obj_name in name_pairs: + try: + module = importlib.import_module(module_name) # may raise ImportError + get_obj_from_module(module, local_obj_name) # may raise AttributeError + except ImportError: + pass + + # we are out of luck, but we have no idea why + raise ImportError(obj_name) + + +def get_obj_from_module(module: types.ModuleType, obj_name: str) -> Any: + """Traverses the object name and returns the last (rightmost) python object.""" + if obj_name == '': + return module + obj = module + for part in obj_name.split("."): + obj = getattr(obj, part) + return obj + + +def get_obj_by_name(name: str) -> Any: + """Finds the python object with the given name.""" + module, obj_name = get_module_from_obj_name(name) + return get_obj_from_module(module, obj_name) + + +def call_func_by_name(*args, func_name: str = None, **kwargs) -> Any: + """Finds the python object with the given name and calls it as a function.""" + assert func_name is not None + func_obj = get_obj_by_name(func_name) + assert callable(func_obj) + return func_obj(*args, **kwargs) + + +def construct_class_by_name(*args, class_name: str = None, **kwargs) -> Any: + """Finds the python class with the given name and constructs it with the given arguments.""" + return call_func_by_name(*args, func_name=class_name, **kwargs) + + +def get_module_dir_by_obj_name(obj_name: str) -> str: + """Get the directory path of the module containing the given object name.""" + module, _ = get_module_from_obj_name(obj_name) + return os.path.dirname(inspect.getfile(module)) + + +def is_top_level_function(obj: Any) -> bool: + """Determine whether the given object is a top-level function, i.e., defined at module scope using 'def'.""" + return callable(obj) and obj.__name__ in sys.modules[obj.__module__].__dict__ + + +def get_top_level_function_name(obj: Any) -> str: + """Return the fully-qualified name of a top-level function.""" + assert is_top_level_function(obj) + module = obj.__module__ + if module == '__main__': + module = os.path.splitext(os.path.basename(sys.modules[module].__file__))[0] + return module + "." + obj.__name__ + + +# File system helpers +# ------------------------------------------------------------------------------------------ + +def list_dir_recursively_with_ignore(dir_path: str, ignores: List[str] = None, add_base_to_relative: bool = False) -> List[Tuple[str, str]]: + """List all files recursively in a given directory while ignoring given file and directory names. + Returns list of tuples containing both absolute and relative paths.""" + assert os.path.isdir(dir_path) + base_name = os.path.basename(os.path.normpath(dir_path)) + + if ignores is None: + ignores = [] + + result = [] + + for root, dirs, files in os.walk(dir_path, topdown=True): + for ignore_ in ignores: + dirs_to_remove = [d for d in dirs if fnmatch.fnmatch(d, ignore_)] + + # dirs need to be edited in-place + for d in dirs_to_remove: + dirs.remove(d) + + files = [f for f in files if not fnmatch.fnmatch(f, ignore_)] + + absolute_paths = [os.path.join(root, f) for f in files] + relative_paths = [os.path.relpath(p, dir_path) for p in absolute_paths] + + if add_base_to_relative: + relative_paths = [os.path.join(base_name, p) for p in relative_paths] + + assert len(absolute_paths) == len(relative_paths) + result += zip(absolute_paths, relative_paths) + + return result + + +def copy_files_and_create_dirs(files: List[Tuple[str, str]]) -> None: + """Takes in a list of tuples of (src, dst) paths and copies files. + Will create all necessary directories.""" + for file in files: + target_dir_name = os.path.dirname(file[1]) + + # will create all intermediate-level directories + if not os.path.exists(target_dir_name): + os.makedirs(target_dir_name) + + shutil.copyfile(file[0], file[1]) + + +# URL helpers +# ------------------------------------------------------------------------------------------ + +def is_url(obj: Any, allow_file_urls: bool = False) -> bool: + """Determine whether the given object is a valid URL string.""" + if not isinstance(obj, str) or not "://" in obj: + return False + if allow_file_urls and obj.startswith('file://'): + return True + try: + res = requests.compat.urlparse(obj) + if not res.scheme or not res.netloc or not "." in res.netloc: + return False + res = requests.compat.urlparse(requests.compat.urljoin(obj, "/")) + if not res.scheme or not res.netloc or not "." in res.netloc: + return False + except: + return False + return True + + +def open_url(url: str, cache_dir: str = None, num_attempts: int = 10, verbose: bool = True, return_filename: bool = False, cache: bool = True) -> Any: + """Download the given URL and return a binary-mode file object to access the data.""" + assert num_attempts >= 1 + assert not (return_filename and (not cache)) + + # Doesn't look like an URL scheme so interpret it as a local filename. + if not re.match('^[a-z]+://', url): + return url if return_filename else open(url, "rb") + + # Handle file URLs. This code handles unusual file:// patterns that + # arise on Windows: + # + # file:///c:/foo.txt + # + # which would translate to a local '/c:/foo.txt' filename that's + # invalid. Drop the forward slash for such pathnames. + # + # If you touch this code path, you should test it on both Linux and + # Windows. + # + # Some internet resources suggest using urllib.request.url2pathname() but + # but that converts forward slashes to backslashes and this causes + # its own set of problems. + if url.startswith('file://'): + filename = urllib.parse.urlparse(url).path + if re.match(r'^/[a-zA-Z]:', filename): + filename = filename[1:] + return filename if return_filename else open(filename, "rb") + + assert is_url(url) + + # Lookup from cache. + if cache_dir is None: + cache_dir = make_cache_dir_path('downloads') + + url_md5 = hashlib.md5(url.encode("utf-8")).hexdigest() + if cache: + cache_files = glob.glob(os.path.join(cache_dir, url_md5 + "_*")) + if len(cache_files) == 1: + filename = cache_files[0] + return filename if return_filename else open(filename, "rb") + + # Download. + url_name = None + url_data = None + with requests.Session() as session: + if verbose: + print("Downloading %s ..." % url, end="", flush=True) + for attempts_left in reversed(range(num_attempts)): + try: + with session.get(url) as res: + res.raise_for_status() + if len(res.content) == 0: + raise IOError("No data received") + + if len(res.content) < 8192: + content_str = res.content.decode("utf-8") + if "download_warning" in res.headers.get("Set-Cookie", ""): + links = [html.unescape(link) for link in content_str.split('"') if "export=download" in link] + if len(links) == 1: + url = requests.compat.urljoin(url, links[0]) + raise IOError("Google Drive virus checker nag") + if "Google Drive - Quota exceeded" in content_str: + raise IOError("Google Drive download quota exceeded -- please try again later") + + match = re.search(r'filename="([^"]*)"', res.headers.get("Content-Disposition", "")) + url_name = match[1] if match else url + url_data = res.content + if verbose: + print(" done") + break + except KeyboardInterrupt: + raise + except: + if not attempts_left: + if verbose: + print(" failed") + raise + if verbose: + print(".", end="", flush=True) + + # Save to cache. + if cache: + safe_name = re.sub(r"[^0-9a-zA-Z-._]", "_", url_name) + cache_file = os.path.join(cache_dir, url_md5 + "_" + safe_name) + temp_file = os.path.join(cache_dir, "tmp_" + uuid.uuid4().hex + "_" + url_md5 + "_" + safe_name) + os.makedirs(cache_dir, exist_ok=True) + with open(temp_file, "wb") as f: + f.write(url_data) + os.replace(temp_file, cache_file) # atomic + if return_filename: + return cache_file + + # Return data as file object. + assert not return_filename + return io.BytesIO(url_data) diff --git a/models/__init__.py b/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/__pycache__/__init__.cpython-38.pyc b/models/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a5f54b0200d77ef3c79f1038f6bfa082c711f33 Binary files /dev/null and b/models/__pycache__/__init__.cpython-38.pyc differ diff --git a/models/__pycache__/base_generator.cpython-38.pyc b/models/__pycache__/base_generator.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd14486467e595de8b64468b28eb444628868882 Binary files /dev/null and b/models/__pycache__/base_generator.cpython-38.pyc differ diff --git a/models/__pycache__/model_settings.cpython-38.pyc b/models/__pycache__/model_settings.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2384fff52a36b94452f0bea04c9df44d3f42a016 Binary files /dev/null and b/models/__pycache__/model_settings.cpython-38.pyc differ diff --git a/models/__pycache__/pggan_generator.cpython-38.pyc b/models/__pycache__/pggan_generator.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78f218262758c190af994257bca0f571731e775c Binary files /dev/null and b/models/__pycache__/pggan_generator.cpython-38.pyc differ diff --git a/models/__pycache__/pggan_generator_model.cpython-38.pyc b/models/__pycache__/pggan_generator_model.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..396351423645d29bedc5121dcb78ccc580d70973 Binary files /dev/null and b/models/__pycache__/pggan_generator_model.cpython-38.pyc differ diff --git a/models/__pycache__/stylegan2_generator.cpython-38.pyc b/models/__pycache__/stylegan2_generator.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51db71399362ed8a0ef0f97d070b675321d1ee88 Binary files /dev/null and b/models/__pycache__/stylegan2_generator.cpython-38.pyc differ diff --git a/models/__pycache__/stylegan3_generator.cpython-38.pyc b/models/__pycache__/stylegan3_generator.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14a65ed76c6cf2b0a5247a8446aa906b8a8be6c3 Binary files /dev/null and b/models/__pycache__/stylegan3_generator.cpython-38.pyc differ diff --git a/models/__pycache__/stylegan3_official_network.cpython-38.pyc b/models/__pycache__/stylegan3_official_network.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca03af116217e79f8034c865cf14fa40719635f7 Binary files /dev/null and b/models/__pycache__/stylegan3_official_network.cpython-38.pyc differ diff --git a/models/__pycache__/stylegan_generator.cpython-38.pyc b/models/__pycache__/stylegan_generator.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3db82cd7fe318e79674ed0f5fc075a9e3c59be85 Binary files /dev/null and b/models/__pycache__/stylegan_generator.cpython-38.pyc differ diff --git a/models/__pycache__/stylegan_generator_model.cpython-38.pyc b/models/__pycache__/stylegan_generator_model.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61af3fee85cc4b266735a4d984fed8aa8caba329 Binary files /dev/null and b/models/__pycache__/stylegan_generator_model.cpython-38.pyc differ diff --git a/models/base_generator.py b/models/base_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..b202d3d871cf84d12ca7291f6b249bcfb3f095ad --- /dev/null +++ b/models/base_generator.py @@ -0,0 +1,248 @@ +# python3.7 +"""Contains the base class for generator.""" + +import os +import sys +import logging +import numpy as np + +import torch + +from . import model_settings + +__all__ = ['BaseGenerator'] + + +def get_temp_logger(logger_name='logger'): + """Gets a temporary logger. + + This logger will print all levels of messages onto the screen. + + Args: + logger_name: Name of the logger. + + Returns: + A `logging.Logger`. + + Raises: + ValueError: If the input `logger_name` is empty. + """ + if not logger_name: + raise ValueError(f'Input `logger_name` should not be empty!') + + logger = logging.getLogger(logger_name) + if not logger.hasHandlers(): + logger.setLevel(logging.DEBUG) + formatter = logging.Formatter("[%(asctime)s][%(levelname)s] %(message)s") + sh = logging.StreamHandler(stream=sys.stdout) + sh.setLevel(logging.DEBUG) + sh.setFormatter(formatter) + logger.addHandler(sh) + + return logger + + +class BaseGenerator(object): + """Base class for generator used in GAN variants. + + NOTE: The model should be defined with pytorch, and only used for inference. + """ + + def __init__(self, model_name, logger=None): + """Initializes with specific settings. + + The model should be registered in `model_settings.py` with proper settings + first. Among them, some attributes are necessary, including: + (1) gan_type: Type of the GAN model. + (2) latent_space_dim: Dimension of the latent space. Should be a tuple. + (3) resolution: Resolution of the synthesis. + (4) min_val: Minimum value of the raw output. (default -1.0) + (5) max_val: Maximum value of the raw output. (default 1.0) + (6) channel_order: Channel order of the output image. (default: `RGB`) + + Args: + model_name: Name with which the model is registered. + logger: Logger for recording log messages. If set as `None`, a default + logger, which prints messages from all levels to screen, will be + created. (default: None) + + Raises: + AttributeError: If some necessary attributes are missing. + """ + self.model_name = model_name + for key, val in model_settings.MODEL_POOL[model_name].items(): + setattr(self, key, val) + self.use_cuda = model_settings.USE_CUDA + self.batch_size = model_settings.MAX_IMAGES_ON_DEVICE + self.logger = logger or get_temp_logger(model_name + '_generator') + self.model = None + self.run_device = 'cuda' if self.use_cuda else 'cpu' + self.cpu_device = 'cpu' + + # Check necessary settings. + self.check_attr('gan_type') + self.check_attr('latent_space_dim') + self.check_attr('resolution') + self.min_val = getattr(self, 'min_val', -1.0) + self.max_val = getattr(self, 'max_val', 1.0) + self.output_channels = getattr(self, 'output_channels', 3) + self.channel_order = getattr(self, 'channel_order', 'RGB').upper() + assert self.channel_order in ['RGB', 'BGR'] + + # Build model and load pre-trained weights. + self.build() + if os.path.isfile(getattr(self, 'model_path', '')): + self.load() + elif os.path.isfile(getattr(self, 'tf_model_path', '')): + self.convert_tf_model() + else: + self.logger.warning(f'No pre-trained model will be loaded!') + + # Change to inference mode and GPU mode if needed. + assert self.model + self.model.eval().to(self.run_device) + + def check_attr(self, attr_name): + """Checks the existence of a particular attribute. + + Args: + attr_name: Name of the attribute to check. + + Raises: + AttributeError: If the target attribute is missing. + """ + if not hasattr(self, attr_name): + raise AttributeError( + f'`{attr_name}` is missing for model `{self.model_name}`!') + + def build(self): + """Builds the graph.""" + raise NotImplementedError(f'Should be implemented in derived class!') + + def load(self): + """Loads pre-trained weights.""" + raise NotImplementedError(f'Should be implemented in derived class!') + + def convert_tf_model(self, test_num=10): + """Converts models weights from tensorflow version. + + Args: + test_num: Number of images to generate for testing whether the conversion + is done correctly. `0` means skipping the test. (default 10) + """ + raise NotImplementedError(f'Should be implemented in derived class!') + + def sample(self, num): + """Samples latent codes randomly. + + Args: + num: Number of latent codes to sample. Should be positive. + + Returns: + A `numpy.ndarray` as sampled latend codes. + """ + raise NotImplementedError(f'Should be implemented in derived class!') + + def preprocess(self, latent_codes): + """Preprocesses the input latent code if needed. + + Args: + latent_codes: The input latent codes for preprocessing. + + Returns: + The preprocessed latent codes which can be used as final input for the + generator. + """ + raise NotImplementedError(f'Should be implemented in derived class!') + + def easy_sample(self, num): + """Wraps functions `sample()` and `preprocess()` together.""" + return self.preprocess(self.sample(num)) + + def synthesize(self, latent_codes): + """Synthesizes images with given latent codes. + + NOTE: The latent codes should have already been preprocessed. + + Args: + latent_codes: Input latent codes for image synthesis. + + Returns: + A dictionary whose values are raw outputs from the generator. + """ + raise NotImplementedError(f'Should be implemented in derived class!') + + def get_value(self, tensor): + """Gets value of a `torch.Tensor`. + + Args: + tensor: The input tensor to get value from. + + Returns: + A `numpy.ndarray`. + + Raises: + ValueError: If the tensor is with neither `torch.Tensor` type or + `numpy.ndarray` type. + """ + if isinstance(tensor, np.ndarray): + return tensor + if isinstance(tensor, torch.Tensor): + return tensor.to(self.cpu_device).detach().numpy() + raise ValueError(f'Unsupported input type `{type(tensor)}`!') + + def postprocess(self, images): + """Postprocesses the output images if needed. + + This function assumes the input numpy array is with shape [batch_size, + channel, height, width]. Here, `channel = 3` for color image and + `channel = 1` for grayscale image. The return images are with shape + [batch_size, height, width, channel]. NOTE: The channel order of output + image will always be `RGB`. + + Args: + images: The raw output from the generator. + + Returns: + The postprocessed images with dtype `numpy.uint8` with range [0, 255]. + + Raises: + ValueError: If the input `images` are not with type `numpy.ndarray` or not + with shape [batch_size, channel, height, width]. + """ + if not isinstance(images, np.ndarray): + raise ValueError(f'Images should be with type `numpy.ndarray`!') + if ('stylegan3' not in self.model_name) and ('stylegan2' not in self.model_name): + images_shape = images.shape + if len(images_shape) != 4 or images_shape[1] not in [1, 3]: + raise ValueError(f'Input should be with shape [batch_size, channel, ' + f'height, width], where channel equals to 1 or 3. ' + f'But {images_shape} is received!') + images = (images - self.min_val) * 255 / (self.max_val - self.min_val) + images = np.clip(images + 0.5, 0, 255).astype(np.uint8) + images = images.transpose(0, 2, 3, 1) + if self.channel_order == 'BGR': + images = images[:, :, :, ::-1] + + return images + + def easy_synthesize(self, latent_codes, **kwargs): + """Wraps functions `synthesize()` and `postprocess()` together.""" + outputs = self.synthesize(latent_codes, **kwargs) + if 'image' in outputs: + outputs['image'] = self.postprocess(outputs['image']) + + return outputs + + def get_batch_inputs(self, latent_codes): + """Gets batch inputs from a collection of latent codes. + + This function will yield at most `self.batch_size` latent_codes at a time. + + Args: + latent_codes: The input latent codes for generation. First dimension + should be the total number. + """ + total_num = latent_codes.shape[0] + for i in range(0, total_num, self.batch_size): + yield latent_codes[i:i + self.batch_size] diff --git a/models/model_settings.py b/models/model_settings.py new file mode 100644 index 0000000000000000000000000000000000000000..65554100918908e5735e8a757439fb77ecc38e0e --- /dev/null +++ b/models/model_settings.py @@ -0,0 +1,102 @@ +# python3.7 +"""Contains basic configurations for models used in this project. + +Please download the public released models from the following two repositories +OR train your own models, and then put them into `pretrain` folder. + +ProgressiveGAN: https://github.com/tkarras/progressive_growing_of_gans +StyleGAN: https://github.com/NVlabs/stylegan +StyleGAN: + +NOTE: Any new model should be registered in `MODEL_POOL` before using. +""" + +import os.path + +BASE_DIR = os.path.dirname(os.path.relpath(__file__)) + +MODEL_DIR = BASE_DIR + '/pretrain' + +MODEL_POOL = { + 'pggan_celebahq': { + 'tf_model_path': MODEL_DIR + '/karras2018iclr-celebahq-1024x1024.pkl', + 'model_path': MODEL_DIR + '/pggan_celebahq.pth', + 'gan_type': 'pggan', + 'dataset_name': 'celebahq', + 'latent_space_dim': 512, + 'resolution': 1024, + 'min_val': -1.0, + 'max_val': 1.0, + 'output_channels': 3, + 'channel_order': 'RGB', + 'fused_scale': False, + }, + 'stylegan_celebahq': { + 'tf_model_path': + MODEL_DIR + '/karras2019stylegan-celebahq-1024x1024.pkl', + 'model_path': MODEL_DIR + '/stylegan_celebahq.pth', + 'gan_type': 'stylegan', + 'dataset_name': 'celebahq', + 'latent_space_dim': 512, + 'w_space_dim': 512, + 'resolution': 1024, + 'min_val': -1.0, + 'max_val': 1.0, + 'output_channels': 3, + 'channel_order': 'RGB', + 'fused_scale': 'auto', + }, + 'stylegan_ffhq': { + 'tf_model_path': MODEL_DIR + '/karras2019stylegan-ffhq-1024x1024.pkl', + 'model_path': MODEL_DIR + '/stylegan_ffhq.pth', + 'gan_type': 'stylegan', + 'dataset_name': 'ffhq', + 'latent_space_dim': 512, + 'w_space_dim': 512, + 'resolution': 1024, + 'min_val': -1.0, + 'max_val': 1.0, + 'output_channels': 3, + 'channel_order': 'RGB', + 'fused_scale': 'auto', + }, + 'stylegan2_ffhq': { + 'tf_model_path': MODEL_DIR + '/karras2019stylegan-ffhq-1024x1024.pkl', + 'model_path': MODEL_DIR + '/stylegan2-ffhq-1024x1024.pkl', + 'gan_type': 'stylegan2', + 'dataset_name': 'ffhq', + 'latent_space_dim': 512, + 'w_space_dim': 512, + 'c_space_dim': 512, + 'resolution': 1024, + 'min_val': -1.0, + 'max_val': 1.0, + 'output_channels': 3, + 'channel_order': 'RGB', + 'fused_scale': 'auto', + }, + 'stylegan3_ffhq': { + 'model_path': MODEL_DIR + '/stylegan3-t-ffhq-1024x1024.pkl', + 'gan_type': 'stylegan3', + 'dataset_name': 'ffhq', + 'latent_space_dim': 512, + 'w_space_dim': 512, + 'c_space_dim': 512, + 'resolution': 1024, + 'min_val': -1.0, + 'max_val': 1.0, + 'output_channels': 3, + 'channel_order': 'RGB', + 'fused_scale': 'auto', + }, +} + +# Settings for StyleGAN. +STYLEGAN_TRUNCATION_PSI = 0.7 # 1.0 means no truncation +STYLEGAN_TRUNCATION_LAYERS = 8 # 0 means no truncation +STYLEGAN_RANDOMIZE_NOISE = False + +# Settings for model running. +USE_CUDA = False + +MAX_IMAGES_ON_DEVICE = 8 diff --git a/models/pggan_generator.py b/models/pggan_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..5a9360fd28a55aa5e7a1e7ce4a3d0ff262dc148f --- /dev/null +++ b/models/pggan_generator.py @@ -0,0 +1,133 @@ +# python3.7 +"""Contains the generator class of ProgressiveGAN. + +Basically, this class is derived from the `BaseGenerator` class defined in +`base_generator.py`. +""" + +import os +import numpy as np + +import torch + +from . import model_settings +from .pggan_generator_model import PGGANGeneratorModel +from .base_generator import BaseGenerator + +__all__ = ['PGGANGenerator'] + + +class PGGANGenerator(BaseGenerator): + """Defines the generator class of ProgressiveGAN.""" + + def __init__(self, model_name, logger=None): + super().__init__(model_name, logger) + assert self.gan_type == 'pggan' + + def build(self): + self.check_attr('fused_scale') + self.model = PGGANGeneratorModel(resolution=self.resolution, + fused_scale=self.fused_scale, + output_channels=self.output_channels) + + def load(self): + self.logger.info(f'Loading pytorch model from `{self.model_path}`.') + self.model.load_state_dict(torch.load(self.model_path)) + self.logger.info(f'Successfully loaded!') + self.lod = self.model.lod.to(self.cpu_device).tolist() + self.logger.info(f' `lod` of the loaded model is {self.lod}.') + + def convert_tf_model(self, test_num=10): + import sys + import pickle + import tensorflow as tf + os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' + sys.path.append(model_settings.BASE_DIR + '/pggan_tf_official') + + self.logger.info(f'Loading tensorflow model from `{self.tf_model_path}`.') + tf.InteractiveSession() + with open(self.tf_model_path, 'rb') as f: + _, _, tf_model = pickle.load(f) + self.logger.info(f'Successfully loaded!') + + self.logger.info(f'Converting tensorflow model to pytorch version.') + tf_vars = dict(tf_model.__getstate__()['variables']) + state_dict = self.model.state_dict() + for pth_var_name, tf_var_name in self.model.pth_to_tf_var_mapping.items(): + if 'ToRGB_lod' in tf_var_name: + lod = int(tf_var_name[len('ToRGB_lod')]) + lod_shift = 10 - int(np.log2(self.resolution)) + tf_var_name = tf_var_name.replace(f'{lod}', f'{lod - lod_shift}') + if tf_var_name not in tf_vars: + self.logger.debug(f'Variable `{tf_var_name}` does not exist in ' + f'tensorflow model.') + continue + self.logger.debug(f' Converting `{tf_var_name}` to `{pth_var_name}`.') + var = torch.from_numpy(np.array(tf_vars[tf_var_name])) + if 'weight' in pth_var_name: + if 'layer0.conv' in pth_var_name: + var = var.view(var.shape[0], -1, 4, 4).permute(1, 0, 2, 3).flip(2, 3) + elif 'Conv0_up' in tf_var_name: + var = var.permute(0, 1, 3, 2) + else: + var = var.permute(3, 2, 0, 1) + state_dict[pth_var_name] = var + self.logger.info(f'Successfully converted!') + + self.logger.info(f'Saving pytorch model to `{self.model_path}`.') + torch.save(state_dict, self.model_path) + self.logger.info(f'Successfully saved!') + + self.load() + + # Official tensorflow model can only run on GPU. + if test_num <= 0 or not tf.test.is_built_with_cuda(): + return + self.logger.info(f'Testing conversion results.') + self.model.eval().to(self.run_device) + label_dim = tf_model.input_shapes[1][1] + tf_fake_label = np.zeros((1, label_dim), np.float32) + total_distance = 0.0 + for i in range(test_num): + latent_code = self.easy_sample(1) + tf_output = tf_model.run(latent_code, tf_fake_label) + pth_output = self.synthesize(latent_code)['image'] + distance = np.average(np.abs(tf_output - pth_output)) + self.logger.debug(f' Test {i:03d}: distance {distance:.6e}.') + total_distance += distance + self.logger.info(f'Average distance is {total_distance / test_num:.6e}.') + + def sample(self, num): + assert num > 0 + return np.random.randn(num, self.latent_space_dim).astype(np.float32) + + def preprocess(self, latent_codes): + if not isinstance(latent_codes, np.ndarray): + raise ValueError(f'Latent codes should be with type `numpy.ndarray`!') + + latent_codes = latent_codes.reshape(-1, self.latent_space_dim) + norm = np.linalg.norm(latent_codes, axis=1, keepdims=True) + latent_codes = latent_codes / norm * np.sqrt(self.latent_space_dim) + return latent_codes.astype(np.float32) + + def synthesize(self, latent_codes): + if not isinstance(latent_codes, np.ndarray): + raise ValueError(f'Latent codes should be with type `numpy.ndarray`!') + latent_codes_shape = latent_codes.shape + if not (len(latent_codes_shape) == 2 and + latent_codes_shape[0] <= self.batch_size and + latent_codes_shape[1] == self.latent_space_dim): + raise ValueError(f'Latent_codes should be with shape [batch_size, ' + f'latent_space_dim], where `batch_size` no larger than ' + f'{self.batch_size}, and `latent_space_dim` equal to ' + f'{self.latent_space_dim}!\n' + f'But {latent_codes_shape} received!') + + zs = torch.from_numpy(latent_codes).type(torch.FloatTensor) + zs = zs.to(self.run_device) + images = self.model(zs) + results = { + 'z': latent_codes, + 'image': self.get_value(images), + } + return results diff --git a/models/pggan_generator_model.py b/models/pggan_generator_model.py new file mode 100644 index 0000000000000000000000000000000000000000..dcb97b503bed3bb4c485668a383398708ad2aae4 --- /dev/null +++ b/models/pggan_generator_model.py @@ -0,0 +1,322 @@ +# python3.7 +"""Contains the implementation of generator described in ProgressiveGAN. + +Different from the official tensorflow model in folder `pggan_tf_official`, this +is a simple pytorch version which only contains the generator part. This class +is specially used for inference. + +For more details, please check the original paper: +https://arxiv.org/pdf/1710.10196.pdf +""" + +import numpy as np + +import torch +import torch.nn as nn +import torch.nn.functional as F + +__all__ = ['PGGANGeneratorModel'] + +# Defines a dictionary, which maps the target resolution of the final generated +# image to numbers of filters used in each convolutional layer in sequence. +_RESOLUTIONS_TO_CHANNELS = { + 8: [512, 512, 512], + 16: [512, 512, 512, 512], + 32: [512, 512, 512, 512, 512], + 64: [512, 512, 512, 512, 512, 256], + 128: [512, 512, 512, 512, 512, 256, 128], + 256: [512, 512, 512, 512, 512, 256, 128, 64], + 512: [512, 512, 512, 512, 512, 256, 128, 64, 32], + 1024: [512, 512, 512, 512, 512, 256, 128, 64, 32, 16], +} + +# Variable mapping from pytorch model to official tensorflow model. +_PGGAN_PTH_VARS_TO_TF_VARS = { + 'lod': 'lod', # [] + 'layer0.conv.weight': '4x4/Dense/weight', # [512, 512, 4, 4] + 'layer0.wscale.bias': '4x4/Dense/bias', # [512] + 'layer1.conv.weight': '4x4/Conv/weight', # [512, 512, 3, 3] + 'layer1.wscale.bias': '4x4/Conv/bias', # [512] + 'layer2.conv.weight': '8x8/Conv0/weight', # [512, 512, 3, 3] + 'layer2.wscale.bias': '8x8/Conv0/bias', # [512] + 'layer3.conv.weight': '8x8/Conv1/weight', # [512, 512, 3, 3] + 'layer3.wscale.bias': '8x8/Conv1/bias', # [512] + 'layer4.conv.weight': '16x16/Conv0/weight', # [512, 512, 3, 3] + 'layer4.wscale.bias': '16x16/Conv0/bias', # [512] + 'layer5.conv.weight': '16x16/Conv1/weight', # [512, 512, 3, 3] + 'layer5.wscale.bias': '16x16/Conv1/bias', # [512] + 'layer6.conv.weight': '32x32/Conv0/weight', # [512, 512, 3, 3] + 'layer6.wscale.bias': '32x32/Conv0/bias', # [512] + 'layer7.conv.weight': '32x32/Conv1/weight', # [512, 512, 3, 3] + 'layer7.wscale.bias': '32x32/Conv1/bias', # [512] + 'layer8.conv.weight': '64x64/Conv0/weight', # [256, 512, 3, 3] + 'layer8.wscale.bias': '64x64/Conv0/bias', # [256] + 'layer9.conv.weight': '64x64/Conv1/weight', # [256, 256, 3, 3] + 'layer9.wscale.bias': '64x64/Conv1/bias', # [256] + 'layer10.conv.weight': '128x128/Conv0/weight', # [128, 256, 3, 3] + 'layer10.wscale.bias': '128x128/Conv0/bias', # [128] + 'layer11.conv.weight': '128x128/Conv1/weight', # [128, 128, 3, 3] + 'layer11.wscale.bias': '128x128/Conv1/bias', # [128] + 'layer12.conv.weight': '256x256/Conv0/weight', # [64, 128, 3, 3] + 'layer12.wscale.bias': '256x256/Conv0/bias', # [64] + 'layer13.conv.weight': '256x256/Conv1/weight', # [64, 64, 3, 3] + 'layer13.wscale.bias': '256x256/Conv1/bias', # [64] + 'layer14.conv.weight': '512x512/Conv0/weight', # [32, 64, 3, 3] + 'layer14.wscale.bias': '512x512/Conv0/bias', # [32] + 'layer15.conv.weight': '512x512/Conv1/weight', # [32, 32, 3, 3] + 'layer15.wscale.bias': '512x512/Conv1/bias', # [32] + 'layer16.conv.weight': '1024x1024/Conv0/weight', # [16, 32, 3, 3] + 'layer16.wscale.bias': '1024x1024/Conv0/bias', # [16] + 'layer17.conv.weight': '1024x1024/Conv1/weight', # [16, 16, 3, 3] + 'layer17.wscale.bias': '1024x1024/Conv1/bias', # [16] + 'output0.conv.weight': 'ToRGB_lod8/weight', # [3, 512, 1, 1] + 'output0.wscale.bias': 'ToRGB_lod8/bias', # [3] + 'output1.conv.weight': 'ToRGB_lod7/weight', # [3, 512, 1, 1] + 'output1.wscale.bias': 'ToRGB_lod7/bias', # [3] + 'output2.conv.weight': 'ToRGB_lod6/weight', # [3, 512, 1, 1] + 'output2.wscale.bias': 'ToRGB_lod6/bias', # [3] + 'output3.conv.weight': 'ToRGB_lod5/weight', # [3, 512, 1, 1] + 'output3.wscale.bias': 'ToRGB_lod5/bias', # [3] + 'output4.conv.weight': 'ToRGB_lod4/weight', # [3, 256, 1, 1] + 'output4.wscale.bias': 'ToRGB_lod4/bias', # [3] + 'output5.conv.weight': 'ToRGB_lod3/weight', # [3, 128, 1, 1] + 'output5.wscale.bias': 'ToRGB_lod3/bias', # [3] + 'output6.conv.weight': 'ToRGB_lod2/weight', # [3, 64, 1, 1] + 'output6.wscale.bias': 'ToRGB_lod2/bias', # [3] + 'output7.conv.weight': 'ToRGB_lod1/weight', # [3, 32, 1, 1] + 'output7.wscale.bias': 'ToRGB_lod1/bias', # [3] + 'output8.conv.weight': 'ToRGB_lod0/weight', # [3, 16, 1, 1] + 'output8.wscale.bias': 'ToRGB_lod0/bias', # [3] +} + + +class PGGANGeneratorModel(nn.Module): + """Defines the generator module in ProgressiveGAN. + + Note that the generated images are with RGB color channels with range [-1, 1]. + """ + + def __init__(self, + resolution=1024, + fused_scale=False, + output_channels=3): + """Initializes the generator with basic settings. + + Args: + resolution: The resolution of the final output image. (default: 1024) + fused_scale: Whether to fused `upsample` and `conv2d` together, resulting + in `conv2_transpose`. (default: False) + output_channels: Number of channels of the output image. (default: 3) + + Raises: + ValueError: If the input `resolution` is not supported. + """ + super().__init__() + + try: + self.channels = _RESOLUTIONS_TO_CHANNELS[resolution] + except KeyError: + raise ValueError(f'Invalid resolution: {resolution}!\n' + f'Resolutions allowed: ' + f'{list(_RESOLUTIONS_TO_CHANNELS)}.') + assert len(self.channels) == int(np.log2(resolution)) + + self.resolution = resolution + self.fused_scale = fused_scale + self.output_channels = output_channels + + for block_idx in range(1, len(self.channels)): + if block_idx == 1: + self.add_module( + f'layer{2 * block_idx - 2}', + ConvBlock(in_channels=self.channels[block_idx - 1], + out_channels=self.channels[block_idx], + kernel_size=4, + padding=3)) + else: + self.add_module( + f'layer{2 * block_idx - 2}', + ConvBlock(in_channels=self.channels[block_idx - 1], + out_channels=self.channels[block_idx], + upsample=True, + fused_scale=self.fused_scale)) + self.add_module( + f'layer{2 * block_idx - 1}', + ConvBlock(in_channels=self.channels[block_idx], + out_channels=self.channels[block_idx])) + self.add_module( + f'output{block_idx - 1}', + ConvBlock(in_channels=self.channels[block_idx], + out_channels=self.output_channels, + kernel_size=1, + padding=0, + wscale_gain=1.0, + activation_type='linear')) + + self.upsample = ResolutionScalingLayer() + self.lod = nn.Parameter(torch.zeros(())) + + self.pth_to_tf_var_mapping = {} + for pth_var_name, tf_var_name in _PGGAN_PTH_VARS_TO_TF_VARS.items(): + if self.fused_scale and 'Conv0' in tf_var_name: + pth_var_name = pth_var_name.replace('conv.weight', 'weight') + tf_var_name = tf_var_name.replace('Conv0', 'Conv0_up') + self.pth_to_tf_var_mapping[pth_var_name] = tf_var_name + + def forward(self, x): + if len(x.shape) != 2: + raise ValueError(f'The input tensor should be with shape [batch_size, ' + f'noise_dim], but {x.shape} received!') + x = x.view(x.shape[0], x.shape[1], 1, 1) + + lod = self.lod.cpu().tolist() + for block_idx in range(1, len(self.channels)): + if block_idx + lod < len(self.channels): + x = self.__getattr__(f'layer{2 * block_idx - 2}')(x) + x = self.__getattr__(f'layer{2 * block_idx - 1}')(x) + image = self.__getattr__(f'output{block_idx - 1}')(x) + else: + image = self.upsample(image) + return image + + +class PixelNormLayer(nn.Module): + """Implements pixel-wise feature vector normalization layer.""" + + def __init__(self, epsilon=1e-8): + super().__init__() + self.epsilon = epsilon + + def forward(self, x): + return x / torch.sqrt(torch.mean(x**2, dim=1, keepdim=True) + self.epsilon) + + +class ResolutionScalingLayer(nn.Module): + """Implements the resolution scaling layer. + + Basically, this layer can be used to upsample or downsample feature maps from + spatial domain with nearest neighbor interpolation. + """ + + def __init__(self, scale_factor=2): + super().__init__() + self.scale_factor = scale_factor + + def forward(self, x): + return F.interpolate(x, scale_factor=self.scale_factor, mode='nearest') + + +class WScaleLayer(nn.Module): + """Implements the layer to scale weight variable and add bias. + + Note that, the weight variable is trained in `nn.Conv2d` layer, and only + scaled with a constant number, which is not trainable, in this layer. However, + the bias variable is trainable in this layer. + """ + + def __init__(self, in_channels, out_channels, kernel_size, gain=np.sqrt(2.0)): + super().__init__() + fan_in = in_channels * kernel_size * kernel_size + self.scale = gain / np.sqrt(fan_in) + self.bias = nn.Parameter(torch.zeros(out_channels)) + + def forward(self, x): + return x * self.scale + self.bias.view(1, -1, 1, 1) + + +class ConvBlock(nn.Module): + """Implements the convolutional block used in ProgressiveGAN. + + Basically, this block executes pixel-wise normalization layer, upsampling + layer (if needed), convolutional layer, weight-scale layer, and activation + layer in sequence. + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1, + dilation=1, + add_bias=False, + upsample=False, + fused_scale=False, + wscale_gain=np.sqrt(2.0), + activation_type='lrelu'): + """Initializes the class with block settings. + + Args: + in_channels: Number of channels of the input tensor fed into this block. + out_channels: Number of channels (kernels) of the output tensor. + kernel_size: Size of the convolutional kernel. + stride: Stride parameter for convolution operation. + padding: Padding parameter for convolution operation. + dilation: Dilation rate for convolution operation. + add_bias: Whether to add bias onto the convolutional result. + upsample: Whether to upsample the input tensor before convolution. + fused_scale: Whether to fused `upsample` and `conv2d` together, resulting + in `conv2_transpose`. + wscale_gain: The gain factor for `wscale` layer. + wscale_lr_multiplier: The learning rate multiplier factor for `wscale` + layer. + activation_type: Type of activation function. Support `linear`, `lrelu` + and `tanh`. + + Raises: + NotImplementedError: If the input `activation_type` is not supported. + """ + super().__init__() + self.pixel_norm = PixelNormLayer() + + if upsample and not fused_scale: + self.upsample = ResolutionScalingLayer() + else: + self.upsample = nn.Identity() + + if upsample and fused_scale: + self.weight = nn.Parameter( + torch.randn(kernel_size, kernel_size, in_channels, out_channels)) + fan_in = in_channels * kernel_size * kernel_size + self.scale = wscale_gain / np.sqrt(fan_in) + else: + self.conv = nn.Conv2d(in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=1, + bias=add_bias) + + self.wscale = WScaleLayer(in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + gain=wscale_gain) + + if activation_type == 'linear': + self.activate = nn.Identity() + elif activation_type == 'lrelu': + self.activate = nn.LeakyReLU(negative_slope=0.2, inplace=True) + elif activation_type == 'tanh': + self.activate = nn.Hardtanh() + else: + raise NotImplementedError(f'Not implemented activation function: ' + f'{activation_type}!') + + def forward(self, x): + x = self.pixel_norm(x) + x = self.upsample(x) + if hasattr(self, 'conv'): + x = self.conv(x) + else: + kernel = self.weight * self.scale + kernel = F.pad(kernel, (0, 0, 0, 0, 1, 1, 1, 1), 'constant', 0.0) + kernel = (kernel[1:, 1:] + kernel[:-1, 1:] + + kernel[1:, :-1] + kernel[:-1, :-1]) + kernel = kernel.permute(2, 3, 0, 1) + x = F.conv_transpose2d(x, kernel, stride=2, padding=1) + x = x / self.scale + x = self.wscale(x) + x = self.activate(x) + return x diff --git a/models/pggan_tf_official/LICENSE.txt b/models/pggan_tf_official/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..193f8009295e62dca9d73ff35d84d473c753abeb --- /dev/null +++ b/models/pggan_tf_official/LICENSE.txt @@ -0,0 +1,410 @@ +Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. + + +Attribution-NonCommercial 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-NonCommercial 4.0 International Public +License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-NonCommercial 4.0 International Public License ("Public +License"). To the extent this Public License may be interpreted as a +contract, You are granted the Licensed Rights in consideration of Your +acceptance of these terms and conditions, and the Licensor grants You +such rights in consideration of benefits the Licensor receives from +making the Licensed Material available under these terms and +conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + d. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + e. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + f. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + g. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + h. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + i. NonCommercial means not primarily intended for or directed towards + commercial advantage or monetary compensation. For purposes of + this Public License, the exchange of the Licensed Material for + other material subject to Copyright and Similar Rights by digital + file-sharing or similar means is NonCommercial provided there is + no payment of monetary compensation in connection with the + exchange. + + j. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + k. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + l. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part, for NonCommercial purposes only; and + + b. produce, reproduce, and Share Adapted Material for + NonCommercial purposes only. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties, including when + the Licensed Material is used other than for NonCommercial + purposes. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + 4. If You Share Adapted Material You produce, the Adapter's + License You apply must not prevent recipients of the Adapted + Material from complying with this Public License. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database for NonCommercial purposes + only; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material; and + + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + +======================================================================= + +Creative Commons is not a party to its public +licenses. Notwithstanding, Creative Commons may elect to apply one of +its public licenses to material it publishes and in those instances +will be considered the "Licensor." The text of the Creative Commons +public licenses is dedicated to the public domain under the CC0 Public +Domain Dedication. Except for the limited purpose of indicating that +material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the +public licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/models/pggan_tf_official/README.md b/models/pggan_tf_official/README.md new file mode 100644 index 0000000000000000000000000000000000000000..49b05b69c57884d1cbe022d3c4dd674973b8fbca --- /dev/null +++ b/models/pggan_tf_official/README.md @@ -0,0 +1,174 @@ +## Progressive Growing of GANs for Improved Quality, Stability, and Variation
– Official TensorFlow implementation of the ICLR 2018 paper + +**Tero Karras** (NVIDIA), **Timo Aila** (NVIDIA), **Samuli Laine** (NVIDIA), **Jaakko Lehtinen** (NVIDIA and Aalto University) + +* For business inquiries, please contact **[researchinquiries@nvidia.com](mailto:researchinquiries@nvidia.com)** +* For press and other inquiries, please contact Hector Marinez at **[hmarinez@nvidia.com](mailto:hmarinez@nvidia.com)** + +![Representative image](https://raw.githubusercontent.com/tkarras/progressive_growing_of_gans/master/representative_image_512x256.png)
+**Picture:** Two imaginary celebrities that were dreamed up by a random number generator. + +**Abstract:**
+*We describe a new training methodology for generative adversarial networks. The key idea is to grow both the generator and discriminator progressively: starting from a low resolution, we add new layers that model increasingly fine details as training progresses. This both speeds the training up and greatly stabilizes it, allowing us to produce images of unprecedented quality, e.g., CelebA images at 1024². We also propose a simple way to increase the variation in generated images, and achieve a record inception score of 8.80 in unsupervised CIFAR10. Additionally, we describe several implementation details that are important for discouraging unhealthy competition between the generator and discriminator. Finally, we suggest a new metric for evaluating GAN results, both in terms of image quality and variation. As an additional contribution, we construct a higher-quality version of the CelebA dataset.* + +## Resources + +* [Paper (NVIDIA research)](http://research.nvidia.com/publication/2017-10_Progressive-Growing-of) +* [Paper (arXiv)](http://arxiv.org/abs/1710.10196) +* [Result video (YouTube)](https://youtu.be/G06dEcZ-QTg) +* [Additional material (Google Drive)](https://drive.google.com/open?id=0B4qLcYyJmiz0NHFULTdYc05lX0U) + * [ICLR 2018 poster (`karras2018iclr-poster.pdf`)](https://drive.google.com/open?id=1ilUVoIejsvG04G0PzFNVn3U3TjSSyHGu) + * [ICLR 2018 slides (`karras2018iclr-slides.pptx`)](https://drive.google.com/open?id=1jYlrX4DgTs2VAfRcyl3pcNI4ONkBg3-g) + * [Representative images (`images/representative-images`)](https://drive.google.com/open?id=0B4qLcYyJmiz0UE9zVHduWFVORlk) + * [High-quality video clips (`videos/high-quality-video-clips`)](https://drive.google.com/open?id=1gQu3O8ZhC-nko8wLFgcNqcwMnRYL_z85) + * [Huge collection of non-curated images for each dataset (`images/100k-generated-images`)](https://drive.google.com/open?id=1j6uZ_a6zci0HyKZdpDq9kSa8VihtEPCp) + * [Extensive video of random interpolations for each dataset (`videos/one-hour-of-random-interpolations`)](https://drive.google.com/open?id=1gAb3oqpaQFHZTwPUXHPIfBIP8eIeWNrI) + * [Pre-trained networks (`networks/tensorflow-version`)](https://drive.google.com/open?id=15hvzxt_XxuokSmj0uO4xxMTMWVc0cIMU) + * [Minimal example script for importing the pre-trained networks (`networks/tensorflow-version/example_import_script`)](https://drive.google.com/open?id=1A79qKDTFp6pExe4gTSgBsEOkxwa2oes_) + * [Data files needed to reconstruct the CelebA-HQ dataset (`datasets/celeba-hq-deltas`)](https://drive.google.com/open?id=0B4qLcYyJmiz0TXY1NG02bzZVRGs) + * [Example training logs and progress snapshots (`networks/tensorflow-version/example_training_runs`)](https://drive.google.com/open?id=1A9SKoQ7Xu2fqK22GHdMw8LZTh6qLvR7H) + +All the material, including source code, is made freely available for non-commercial use under the Creative Commons [CC BY-NC 4.0](https://creativecommons.org/licenses/by-nc/4.0/legalcode) license. Feel free to use any of the material in your own work, as long as you give us appropriate credit by mentioning the title and author list of our paper. + +## Versions + +There are two different versions of the source code. The *TensorFlow version* is newer and more polished, and we generally recommend it as a starting point if you are looking to experiment with our technique, build upon it, or apply it to novel datasets. The *original Theano version*, on the other hand, is what we used to produce all the results shown in our paper. We recommend using it if – and only if – you are looking to reproduce our exact results for benchmark datasets like CIFAR-10, MNIST-RGB, and CelebA. + +The main differences are summarized in the following table: + +| Feature | TensorFlow version | Original Theano version | +| :-------------------------------- | :-------------------------------------------: | :-----------------------: | +| Branch | [master](https://github.com/tkarras/progressive_growing_of_gans/tree/master) (this branch) | [original-theano-version](https://github.com/tkarras/progressive_growing_of_gans/tree/original-theano-version) | +| Multi-GPU support | Yes | No | +| FP16 mixed-precision support | Yes | No | +| Performance | High | Low | +| Training time for CelebA-HQ | 2 days (8 GPUs)
2 weeks (1 GPU) | 1–2 months | +| Repro CelebA-HQ results | Yes – very close | Yes – identical | +| Repro LSUN results | Yes – very close | Yes – identical | +| Repro CIFAR-10 results | No | Yes – identical | +| Repro MNIST mode recovery | No | Yes – identical | +| Repro ablation study (Table 1) | No | Yes – identical | +| Dataset format | TFRecords | HDF5 | +| Backwards compatibility | Can import networks
trained with Theano | N/A | +| Code quality | Reasonable | Somewhat messy | +| Code status | In active use | No longer maintained | + +## System requirements + +* Both Linux and Windows are supported, but we strongly recommend Linux for performance and compatibility reasons. +* 64-bit Python 3.6 installation with numpy 1.13.3 or newer. We recommend Anaconda3. +* One or more high-end NVIDIA Pascal or Volta GPUs with 16GB of DRAM. We recommend NVIDIA DGX-1 with 8 Tesla V100 GPUs. +* NVIDIA driver 391.25 or newer, CUDA toolkit 9.0 or newer, cuDNN 7.1.2 or newer. +* Additional Python packages listed in `requirements-pip.txt` + +## Importing and using pre-trained networks + +All pre-trained networks found on Google Drive, as well as ones produced by the training script, are stored as Python PKL files. They can be imported using the standard `pickle` mechanism as long as two conditions are met: (1) The directory containing the Progressive GAN code repository must be included in the PYTHONPATH environment variable, and (2) a `tf.Session()` object must have been created beforehand and set as default. Each PKL file contains 3 instances of `tfutil.Network`: + +``` +# Import official CelebA-HQ networks. +with open('karras2018iclr-celebahq-1024x1024.pkl', 'rb') as file: + G, D, Gs = pickle.load(file) + # G = Instantaneous snapshot of the generator, mainly useful for resuming a previous training run. + # D = Instantaneous snapshot of the discriminator, mainly useful for resuming a previous training run. + # Gs = Long-term average of the generator, yielding higher-quality results than the instantaneous snapshot. +``` + +It is also possible to import networks that were produced using the Theano implementation, as long as they do not employ any features that are not natively supported by the TensorFlow version (minibatch discrimination, batch normalization, etc.). To enable Theano network import, however, you must use `misc.load_pkl()` in place of `pickle.load()`: + +``` +# Import Theano versions of the official CelebA-HQ networks. +import misc +G, D, Gs = misc.load_pkl('200-celebahq-1024x1024/network-final.pkl') +``` + +Once you have imported the networks, you can call `Gs.run()` to produce a set of images for given latent vectors, or `Gs.get_output_for()` to include the generator network in a larger TensorFlow expression. For further details, please consult the example script found on Google Drive. Instructions: + +1. Pull the Progressive GAN code repository and add it to your PYTHONPATH environment variable. +2. Install the required Python packages with `pip install -r requirements-pip.txt` +2. Download [`import_example.py`](https://drive.google.com/open?id=1xZul7DwqqJoe5OCuKHw6fQVeQZNIMSuF) from [`networks/tensorflow-version/example_import_script`](https://drive.google.com/open?id=1A79qKDTFp6pExe4gTSgBsEOkxwa2oes_) +3. Download [`karras2018iclr-celebahq-1024x1024.pkl`](https://drive.google.com/open?id=188K19ucknC6wg1R6jbuPEhTq9zoufOx4) from [`networks/tensorflow-version`](https://drive.google.com/open?id=15hvzxt_XxuokSmj0uO4xxMTMWVc0cIMU) and place it in the same directory as the script. +5. Run the script with `python import_example.py` +6. If everything goes well, the script should generate 10 PNG images (`img0.png` – `img9.png`) that match the ones found in [`networks/tensorflow-version/example_import_script`](https://drive.google.com/open?id=1A79qKDTFp6pExe4gTSgBsEOkxwa2oes_) exactly. + +## Preparing datasets for training + +The Progressive GAN code repository contains a command-line tool for recreating bit-exact replicas of the datasets that we used in the paper. The tool also provides various utilities for operating on the datasets: + +``` +usage: dataset_tool.py [-h] ... + + display Display images in dataset. + extract Extract images from dataset. + compare Compare two datasets. + create_mnist Create dataset for MNIST. + create_mnistrgb Create dataset for MNIST-RGB. + create_cifar10 Create dataset for CIFAR-10. + create_cifar100 Create dataset for CIFAR-100. + create_svhn Create dataset for SVHN. + create_lsun Create dataset for single LSUN category. + create_celeba Create dataset for CelebA. + create_celebahq Create dataset for CelebA-HQ. + create_from_images Create dataset from a directory full of images. + create_from_hdf5 Create dataset from legacy HDF5 archive. + +Type "dataset_tool.py -h" for more information. +``` + +The datasets are represented by directories containing the same image data in several resolutions to enable efficient streaming. There is a separate `*.tfrecords` file for each resolution, and if the dataset contains labels, they are stored in a separate file as well: + +``` +> python dataset_tool.py create_cifar10 datasets/cifar10 ~/downloads/cifar10 +> ls -la datasets/cifar10 +drwxr-xr-x 2 user user 7 Feb 21 10:07 . +drwxrwxr-x 10 user user 62 Apr 3 15:10 .. +-rw-r--r-- 1 user user 4900000 Feb 19 13:17 cifar10-r02.tfrecords +-rw-r--r-- 1 user user 12350000 Feb 19 13:17 cifar10-r03.tfrecords +-rw-r--r-- 1 user user 41150000 Feb 19 13:17 cifar10-r04.tfrecords +-rw-r--r-- 1 user user 156350000 Feb 19 13:17 cifar10-r05.tfrecords +-rw-r--r-- 1 user user 2000080 Feb 19 13:17 cifar10-rxx.labels +``` + +The ```create_*``` commands take the standard version of a given dataset as input and produce the corresponding `*.tfrecords` files as output. Additionally, the ```create_celebahq``` command requires a set of data files representing deltas with respect to the original CelebA dataset. These deltas (27.6GB) can be downloaded from [`datasets/celeba-hq-deltas`](https://drive.google.com/open?id=0B4qLcYyJmiz0TXY1NG02bzZVRGs). + +**Note about module versions**: Some of the dataset commands require specific versions of Python modules and system libraries (e.g. pillow, libjpeg), and they will give an error if the versions do not match. Please heed the error messages – there is **no way** to get the commands to work other than installing these specific versions. + +## Training networks + +Once the necessary datasets are set up, you can proceed to train your own networks. The general procedure is as follows: + +1. Edit `config.py` to specify the dataset and training configuration by uncommenting/editing specific lines. +2. Run the training script with `python train.py`. +3. The results are written into a newly created subdirectory under `config.result_dir` +4. Wait several days (or weeks) for the training to converge, and analyze the results. + +By default, `config.py` is configured to train a 1024x1024 network for CelebA-HQ using a single-GPU. This is expected to take about two weeks even on the highest-end NVIDIA GPUs. The key to enabling faster training is to employ multiple GPUs and/or go for a lower-resolution dataset. To this end, `config.py` contains several examples for commonly used datasets, as well as a set of "configuration presets" for multi-GPU training. All of the presets are expected to yield roughly the same image quality for CelebA-HQ, but their total training time can vary considerably: + +* `preset-v1-1gpu`: Original config that was used to produce the CelebA-HQ and LSUN results shown in the paper. Expected to take about 1 month on NVIDIA Tesla V100. +* `preset-v2-1gpu`: Optimized config that converges considerably faster than the original one. Expected to take about 2 weeks on 1xV100. +* `preset-v2-2gpus`: Optimized config for 2 GPUs. Takes about 1 week on 2xV100. +* `preset-v2-4gpus`: Optimized config for 4 GPUs. Takes about 3 days on 4xV100. +* `preset-v2-8gpus`: Optimized config for 8 GPUs. Takes about 2 days on 8xV100. + +For reference, the expected output of each configuration preset for CelebA-HQ can be found in [`networks/tensorflow-version/example_training_runs`](https://drive.google.com/open?id=1A9SKoQ7Xu2fqK22GHdMw8LZTh6qLvR7H) + +Other noteworthy config options: + +* `fp16`: Enable [FP16 mixed-precision training](http://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html) to reduce the training times even further. The actual speedup is heavily dependent on GPU architecture and cuDNN version, and it can be expected to increase considerably in the future. +* `BENCHMARK`: Quickly iterate through the resolutions to measure the raw training performance. +* `BENCHMARK0`: Same as `BENCHMARK`, but only use the highest resolution. +* `syn1024rgb`: Synthetic 1024x1024 dataset consisting of just black images. Useful for benchmarking. +* `VERBOSE`: Save image and network snapshots very frequently to facilitate debugging. +* `GRAPH` and `HIST`: Include additional data in the TensorBoard report. + +## Analyzing results + +Training results can be analyzed in several ways: + +* **Manual inspection**: The training script saves a snapshot of randomly generated images at regular intervals in `fakes*.png` and reports the overall progress in `log.txt`. +* **TensorBoard**: The training script also exports various running statistics in a `*.tfevents` file that can be visualized in TensorBoard with `tensorboard --logdir `. +* **Generating images and videos**: At the end of `config.py`, there are several pre-defined configs to launch utility scripts (`generate_*`). For example: + * Suppose you have an ongoing training run titled `010-pgan-celebahq-preset-v1-1gpu-fp32`, and you want to generate a video of random interpolations for the latest snapshot. + * Uncomment the `generate_interpolation_video` line in `config.py`, replace `run_id=10`, and run `python train.py` + * The script will automatically locate the latest network snapshot and create a new result directory containing a single MP4 file. +* **Quality metrics**: Similar to the previous example, `config.py` also contains pre-defined configs to compute various quality metrics (Sliced Wasserstein distance, Fréchet inception distance, etc.) for an existing training run. The metrics are computed for each network snapshot in succession and stored in `metric-*.txt` in the original result directory. diff --git a/models/pggan_tf_official/config.py b/models/pggan_tf_official/config.py new file mode 100644 index 0000000000000000000000000000000000000000..10031ac8b2bcc317dcd21cf73bf6539f789f7b91 --- /dev/null +++ b/models/pggan_tf_official/config.py @@ -0,0 +1,140 @@ +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +#---------------------------------------------------------------------------- +# Convenience class that behaves exactly like dict(), but allows accessing +# the keys and values using the attribute syntax, i.e., "mydict.key = value". + +class EasyDict(dict): + def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) + def __getattr__(self, name): return self[name] + def __setattr__(self, name, value): self[name] = value + def __delattr__(self, name): del self[name] + +#---------------------------------------------------------------------------- +# Paths. + +data_dir = 'datasets' +result_dir = 'results' + +#---------------------------------------------------------------------------- +# TensorFlow options. + +tf_config = EasyDict() # TensorFlow session config, set by tfutil.init_tf(). +env = EasyDict() # Environment variables, set by the main program in train.py. + +tf_config['graph_options.place_pruned_graph'] = True # False (default) = Check that all ops are available on the designated device. True = Skip the check for ops that are not used. +#tf_config['gpu_options.allow_growth'] = False # False (default) = Allocate all GPU memory at the beginning. True = Allocate only as much GPU memory as needed. +#env.CUDA_VISIBLE_DEVICES = '0' # Unspecified (default) = Use all available GPUs. List of ints = CUDA device numbers to use. +env.TF_CPP_MIN_LOG_LEVEL = '1' # 0 (default) = Print all available debug info from TensorFlow. 1 = Print warnings and errors, but disable debug info. + +#---------------------------------------------------------------------------- +# Official training configs, targeted mainly for CelebA-HQ. +# To run, comment/uncomment the lines as appropriate and launch train.py. + +desc = 'pgan' # Description string included in result subdir name. +random_seed = 1000 # Global random seed. +dataset = EasyDict() # Options for dataset.load_dataset(). +train = EasyDict(func='train.train_progressive_gan') # Options for main training func. +G = EasyDict(func='networks.G_paper') # Options for generator network. +D = EasyDict(func='networks.D_paper') # Options for discriminator network. +G_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for generator optimizer. +D_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for discriminator optimizer. +G_loss = EasyDict(func='loss.G_wgan_acgan') # Options for generator loss. +D_loss = EasyDict(func='loss.D_wgangp_acgan') # Options for discriminator loss. +sched = EasyDict() # Options for train.TrainingSchedule. +grid = EasyDict(size='1080p', layout='random') # Options for train.setup_snapshot_image_grid(). + +# Dataset (choose one). +desc += '-celebahq'; dataset = EasyDict(tfrecord_dir='celebahq'); train.mirror_augment = True +#desc += '-celeba'; dataset = EasyDict(tfrecord_dir='celeba'); train.mirror_augment = True +#desc += '-cifar10'; dataset = EasyDict(tfrecord_dir='cifar10') +#desc += '-cifar100'; dataset = EasyDict(tfrecord_dir='cifar100') +#desc += '-svhn'; dataset = EasyDict(tfrecord_dir='svhn') +#desc += '-mnist'; dataset = EasyDict(tfrecord_dir='mnist') +#desc += '-mnistrgb'; dataset = EasyDict(tfrecord_dir='mnistrgb') +#desc += '-syn1024rgb'; dataset = EasyDict(class_name='dataset.SyntheticDataset', resolution=1024, num_channels=3) +#desc += '-lsun-airplane'; dataset = EasyDict(tfrecord_dir='lsun-airplane-100k'); train.mirror_augment = True +#desc += '-lsun-bedroom'; dataset = EasyDict(tfrecord_dir='lsun-bedroom-100k'); train.mirror_augment = True +#desc += '-lsun-bicycle'; dataset = EasyDict(tfrecord_dir='lsun-bicycle-100k'); train.mirror_augment = True +#desc += '-lsun-bird'; dataset = EasyDict(tfrecord_dir='lsun-bird-100k'); train.mirror_augment = True +#desc += '-lsun-boat'; dataset = EasyDict(tfrecord_dir='lsun-boat-100k'); train.mirror_augment = True +#desc += '-lsun-bottle'; dataset = EasyDict(tfrecord_dir='lsun-bottle-100k'); train.mirror_augment = True +#desc += '-lsun-bridge'; dataset = EasyDict(tfrecord_dir='lsun-bridge-100k'); train.mirror_augment = True +#desc += '-lsun-bus'; dataset = EasyDict(tfrecord_dir='lsun-bus-100k'); train.mirror_augment = True +#desc += '-lsun-car'; dataset = EasyDict(tfrecord_dir='lsun-car-100k'); train.mirror_augment = True +#desc += '-lsun-cat'; dataset = EasyDict(tfrecord_dir='lsun-cat-100k'); train.mirror_augment = True +#desc += '-lsun-chair'; dataset = EasyDict(tfrecord_dir='lsun-chair-100k'); train.mirror_augment = True +#desc += '-lsun-churchoutdoor'; dataset = EasyDict(tfrecord_dir='lsun-churchoutdoor-100k'); train.mirror_augment = True +#desc += '-lsun-classroom'; dataset = EasyDict(tfrecord_dir='lsun-classroom-100k'); train.mirror_augment = True +#desc += '-lsun-conferenceroom'; dataset = EasyDict(tfrecord_dir='lsun-conferenceroom-100k'); train.mirror_augment = True +#desc += '-lsun-cow'; dataset = EasyDict(tfrecord_dir='lsun-cow-100k'); train.mirror_augment = True +#desc += '-lsun-diningroom'; dataset = EasyDict(tfrecord_dir='lsun-diningroom-100k'); train.mirror_augment = True +#desc += '-lsun-diningtable'; dataset = EasyDict(tfrecord_dir='lsun-diningtable-100k'); train.mirror_augment = True +#desc += '-lsun-dog'; dataset = EasyDict(tfrecord_dir='lsun-dog-100k'); train.mirror_augment = True +#desc += '-lsun-horse'; dataset = EasyDict(tfrecord_dir='lsun-horse-100k'); train.mirror_augment = True +#desc += '-lsun-kitchen'; dataset = EasyDict(tfrecord_dir='lsun-kitchen-100k'); train.mirror_augment = True +#desc += '-lsun-livingroom'; dataset = EasyDict(tfrecord_dir='lsun-livingroom-100k'); train.mirror_augment = True +#desc += '-lsun-motorbike'; dataset = EasyDict(tfrecord_dir='lsun-motorbike-100k'); train.mirror_augment = True +#desc += '-lsun-person'; dataset = EasyDict(tfrecord_dir='lsun-person-100k'); train.mirror_augment = True +#desc += '-lsun-pottedplant'; dataset = EasyDict(tfrecord_dir='lsun-pottedplant-100k'); train.mirror_augment = True +#desc += '-lsun-restaurant'; dataset = EasyDict(tfrecord_dir='lsun-restaurant-100k'); train.mirror_augment = True +#desc += '-lsun-sheep'; dataset = EasyDict(tfrecord_dir='lsun-sheep-100k'); train.mirror_augment = True +#desc += '-lsun-sofa'; dataset = EasyDict(tfrecord_dir='lsun-sofa-100k'); train.mirror_augment = True +#desc += '-lsun-tower'; dataset = EasyDict(tfrecord_dir='lsun-tower-100k'); train.mirror_augment = True +#desc += '-lsun-train'; dataset = EasyDict(tfrecord_dir='lsun-train-100k'); train.mirror_augment = True +#desc += '-lsun-tvmonitor'; dataset = EasyDict(tfrecord_dir='lsun-tvmonitor-100k'); train.mirror_augment = True + +# Conditioning & snapshot options. +#desc += '-cond'; dataset.max_label_size = 'full' # conditioned on full label +#desc += '-cond1'; dataset.max_label_size = 1 # conditioned on first component of the label +#desc += '-g4k'; grid.size = '4k' +#desc += '-grpc'; grid.layout = 'row_per_class' + +# Config presets (choose one). +#desc += '-preset-v1-1gpu'; num_gpus = 1; D.mbstd_group_size = 16; sched.minibatch_base = 16; sched.minibatch_dict = {256: 14, 512: 6, 1024: 3}; sched.lod_training_kimg = 800; sched.lod_transition_kimg = 800; train.total_kimg = 19000 +desc += '-preset-v2-1gpu'; num_gpus = 1; sched.minibatch_base = 4; sched.minibatch_dict = {4: 128, 8: 128, 16: 128, 32: 64, 64: 32, 128: 16, 256: 8, 512: 4}; sched.G_lrate_dict = {1024: 0.0015}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict); train.total_kimg = 12000 +#desc += '-preset-v2-2gpus'; num_gpus = 2; sched.minibatch_base = 8; sched.minibatch_dict = {4: 256, 8: 256, 16: 128, 32: 64, 64: 32, 128: 16, 256: 8}; sched.G_lrate_dict = {512: 0.0015, 1024: 0.002}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict); train.total_kimg = 12000 +#desc += '-preset-v2-4gpus'; num_gpus = 4; sched.minibatch_base = 16; sched.minibatch_dict = {4: 512, 8: 256, 16: 128, 32: 64, 64: 32, 128: 16}; sched.G_lrate_dict = {256: 0.0015, 512: 0.002, 1024: 0.003}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict); train.total_kimg = 12000 +#desc += '-preset-v2-8gpus'; num_gpus = 8; sched.minibatch_base = 32; sched.minibatch_dict = {4: 512, 8: 256, 16: 128, 32: 64, 64: 32}; sched.G_lrate_dict = {128: 0.0015, 256: 0.002, 512: 0.003, 1024: 0.003}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict); train.total_kimg = 12000 + +# Numerical precision (choose one). +desc += '-fp32'; sched.max_minibatch_per_gpu = {256: 16, 512: 8, 1024: 4} +#desc += '-fp16'; G.dtype = 'float16'; D.dtype = 'float16'; G.pixelnorm_epsilon=1e-4; G_opt.use_loss_scaling = True; D_opt.use_loss_scaling = True; sched.max_minibatch_per_gpu = {512: 16, 1024: 8} + +# Disable individual features. +#desc += '-nogrowing'; sched.lod_initial_resolution = 1024; sched.lod_training_kimg = 0; sched.lod_transition_kimg = 0; train.total_kimg = 10000 +#desc += '-nopixelnorm'; G.use_pixelnorm = False +#desc += '-nowscale'; G.use_wscale = False; D.use_wscale = False +#desc += '-noleakyrelu'; G.use_leakyrelu = False +#desc += '-nosmoothing'; train.G_smoothing = 0.0 +#desc += '-norepeat'; train.minibatch_repeats = 1 +#desc += '-noreset'; train.reset_opt_for_new_lod = False + +# Special modes. +#desc += '-BENCHMARK'; sched.lod_initial_resolution = 4; sched.lod_training_kimg = 3; sched.lod_transition_kimg = 3; train.total_kimg = (8*2+1)*3; sched.tick_kimg_base = 1; sched.tick_kimg_dict = {}; train.image_snapshot_ticks = 1000; train.network_snapshot_ticks = 1000 +#desc += '-BENCHMARK0'; sched.lod_initial_resolution = 1024; train.total_kimg = 10; sched.tick_kimg_base = 1; sched.tick_kimg_dict = {}; train.image_snapshot_ticks = 1000; train.network_snapshot_ticks = 1000 +#desc += '-VERBOSE'; sched.tick_kimg_base = 1; sched.tick_kimg_dict = {}; train.image_snapshot_ticks = 1; train.network_snapshot_ticks = 100 +#desc += '-GRAPH'; train.save_tf_graph = True +#desc += '-HIST'; train.save_weight_histograms = True + +#---------------------------------------------------------------------------- +# Utility scripts. +# To run, uncomment the appropriate line and launch train.py. + +#train = EasyDict(func='util_scripts.generate_fake_images', run_id=23, num_pngs=1000); num_gpus = 1; desc = 'fake-images-' + str(train.run_id) +#train = EasyDict(func='util_scripts.generate_fake_images', run_id=23, grid_size=[15,8], num_pngs=10, image_shrink=4); num_gpus = 1; desc = 'fake-grids-' + str(train.run_id) +#train = EasyDict(func='util_scripts.generate_interpolation_video', run_id=23, grid_size=[1,1], duration_sec=60.0, smoothing_sec=1.0); num_gpus = 1; desc = 'interpolation-video-' + str(train.run_id) +#train = EasyDict(func='util_scripts.generate_training_video', run_id=23, duration_sec=20.0); num_gpus = 1; desc = 'training-video-' + str(train.run_id) + +#train = EasyDict(func='util_scripts.evaluate_metrics', run_id=23, log='metric-swd-16k.txt', metrics=['swd'], num_images=16384, real_passes=2); num_gpus = 1; desc = train.log.split('.')[0] + '-' + str(train.run_id) +#train = EasyDict(func='util_scripts.evaluate_metrics', run_id=23, log='metric-fid-10k.txt', metrics=['fid'], num_images=10000, real_passes=1); num_gpus = 1; desc = train.log.split('.')[0] + '-' + str(train.run_id) +#train = EasyDict(func='util_scripts.evaluate_metrics', run_id=23, log='metric-fid-50k.txt', metrics=['fid'], num_images=50000, real_passes=1); num_gpus = 1; desc = train.log.split('.')[0] + '-' + str(train.run_id) +#train = EasyDict(func='util_scripts.evaluate_metrics', run_id=23, log='metric-is-50k.txt', metrics=['is'], num_images=50000, real_passes=1); num_gpus = 1; desc = train.log.split('.')[0] + '-' + str(train.run_id) +#train = EasyDict(func='util_scripts.evaluate_metrics', run_id=23, log='metric-msssim-20k.txt', metrics=['msssim'], num_images=20000, real_passes=1); num_gpus = 1; desc = train.log.split('.')[0] + '-' + str(train.run_id) + +#---------------------------------------------------------------------------- diff --git a/models/pggan_tf_official/dataset.py b/models/pggan_tf_official/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..a56c236a05faff1098aad2910e56decd4a63faf9 --- /dev/null +++ b/models/pggan_tf_official/dataset.py @@ -0,0 +1,241 @@ +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +import os +import glob +import numpy as np +import tensorflow as tf +import tfutil + +#---------------------------------------------------------------------------- +# Parse individual image from a tfrecords file. + +def parse_tfrecord_tf(record): + features = tf.parse_single_example(record, features={ + 'shape': tf.FixedLenFeature([3], tf.int64), + 'data': tf.FixedLenFeature([], tf.string)}) + data = tf.decode_raw(features['data'], tf.uint8) + return tf.reshape(data, features['shape']) + +def parse_tfrecord_np(record): + ex = tf.train.Example() + ex.ParseFromString(record) + shape = ex.features.feature['shape'].int64_list.value + data = ex.features.feature['data'].bytes_list.value[0] + return np.fromstring(data, np.uint8).reshape(shape) + +#---------------------------------------------------------------------------- +# Dataset class that loads data from tfrecords files. + +class TFRecordDataset: + def __init__(self, + tfrecord_dir, # Directory containing a collection of tfrecords files. + resolution = None, # Dataset resolution, None = autodetect. + label_file = None, # Relative path of the labels file, None = autodetect. + max_label_size = 0, # 0 = no labels, 'full' = full labels, = N first label components. + repeat = True, # Repeat dataset indefinitely. + shuffle_mb = 4096, # Shuffle data within specified window (megabytes), 0 = disable shuffling. + prefetch_mb = 2048, # Amount of data to prefetch (megabytes), 0 = disable prefetching. + buffer_mb = 256, # Read buffer size (megabytes). + num_threads = 2): # Number of concurrent threads. + + self.tfrecord_dir = tfrecord_dir + self.resolution = None + self.resolution_log2 = None + self.shape = [] # [channel, height, width] + self.dtype = 'uint8' + self.dynamic_range = [0, 255] + self.label_file = label_file + self.label_size = None # [component] + self.label_dtype = None + self._np_labels = None + self._tf_minibatch_in = None + self._tf_labels_var = None + self._tf_labels_dataset = None + self._tf_datasets = dict() + self._tf_iterator = None + self._tf_init_ops = dict() + self._tf_minibatch_np = None + self._cur_minibatch = -1 + self._cur_lod = -1 + + # List tfrecords files and inspect their shapes. + assert os.path.isdir(self.tfrecord_dir) + tfr_files = sorted(glob.glob(os.path.join(self.tfrecord_dir, '*.tfrecords'))) + assert len(tfr_files) >= 1 + tfr_shapes = [] + for tfr_file in tfr_files: + tfr_opt = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.NONE) + for record in tf.python_io.tf_record_iterator(tfr_file, tfr_opt): + tfr_shapes.append(parse_tfrecord_np(record).shape) + break + + # Autodetect label filename. + if self.label_file is None: + guess = sorted(glob.glob(os.path.join(self.tfrecord_dir, '*.labels'))) + if len(guess): + self.label_file = guess[0] + elif not os.path.isfile(self.label_file): + guess = os.path.join(self.tfrecord_dir, self.label_file) + if os.path.isfile(guess): + self.label_file = guess + + # Determine shape and resolution. + max_shape = max(tfr_shapes, key=lambda shape: np.prod(shape)) + self.resolution = resolution if resolution is not None else max_shape[1] + self.resolution_log2 = int(np.log2(self.resolution)) + self.shape = [max_shape[0], self.resolution, self.resolution] + tfr_lods = [self.resolution_log2 - int(np.log2(shape[1])) for shape in tfr_shapes] + assert all(shape[0] == max_shape[0] for shape in tfr_shapes) + assert all(shape[1] == shape[2] for shape in tfr_shapes) + assert all(shape[1] == self.resolution // (2**lod) for shape, lod in zip(tfr_shapes, tfr_lods)) + assert all(lod in tfr_lods for lod in range(self.resolution_log2 - 1)) + + # Load labels. + assert max_label_size == 'full' or max_label_size >= 0 + self._np_labels = np.zeros([1<<20, 0], dtype=np.float32) + if self.label_file is not None and max_label_size != 0: + self._np_labels = np.load(self.label_file) + assert self._np_labels.ndim == 2 + if max_label_size != 'full' and self._np_labels.shape[1] > max_label_size: + self._np_labels = self._np_labels[:, :max_label_size] + self.label_size = self._np_labels.shape[1] + self.label_dtype = self._np_labels.dtype.name + + # Build TF expressions. + with tf.name_scope('Dataset'), tf.device('/cpu:0'): + self._tf_minibatch_in = tf.placeholder(tf.int64, name='minibatch_in', shape=[]) + tf_labels_init = tf.zeros(self._np_labels.shape, self._np_labels.dtype) + self._tf_labels_var = tf.Variable(tf_labels_init, name='labels_var') + tfutil.set_vars({self._tf_labels_var: self._np_labels}) + self._tf_labels_dataset = tf.data.Dataset.from_tensor_slices(self._tf_labels_var) + for tfr_file, tfr_shape, tfr_lod in zip(tfr_files, tfr_shapes, tfr_lods): + if tfr_lod < 0: + continue + dset = tf.data.TFRecordDataset(tfr_file, compression_type='', buffer_size=buffer_mb<<20) + dset = dset.map(parse_tfrecord_tf, num_parallel_calls=num_threads) + dset = tf.data.Dataset.zip((dset, self._tf_labels_dataset)) + bytes_per_item = np.prod(tfr_shape) * np.dtype(self.dtype).itemsize + if shuffle_mb > 0: + dset = dset.shuffle(((shuffle_mb << 20) - 1) // bytes_per_item + 1) + if repeat: + dset = dset.repeat() + if prefetch_mb > 0: + dset = dset.prefetch(((prefetch_mb << 20) - 1) // bytes_per_item + 1) + dset = dset.batch(self._tf_minibatch_in) + self._tf_datasets[tfr_lod] = dset + self._tf_iterator = tf.data.Iterator.from_structure(self._tf_datasets[0].output_types, self._tf_datasets[0].output_shapes) + self._tf_init_ops = {lod: self._tf_iterator.make_initializer(dset) for lod, dset in self._tf_datasets.items()} + + # Use the given minibatch size and level-of-detail for the data returned by get_minibatch_tf(). + def configure(self, minibatch_size, lod=0): + lod = int(np.floor(lod)) + assert minibatch_size >= 1 and lod in self._tf_datasets + if self._cur_minibatch != minibatch_size or self._cur_lod != lod: + self._tf_init_ops[lod].run({self._tf_minibatch_in: minibatch_size}) + self._cur_minibatch = minibatch_size + self._cur_lod = lod + + # Get next minibatch as TensorFlow expressions. + def get_minibatch_tf(self): # => images, labels + return self._tf_iterator.get_next() + + # Get next minibatch as NumPy arrays. + def get_minibatch_np(self, minibatch_size, lod=0): # => images, labels + self.configure(minibatch_size, lod) + if self._tf_minibatch_np is None: + self._tf_minibatch_np = self.get_minibatch_tf() + return tfutil.run(self._tf_minibatch_np) + + # Get random labels as TensorFlow expression. + def get_random_labels_tf(self, minibatch_size): # => labels + if self.label_size > 0: + return tf.gather(self._tf_labels_var, tf.random_uniform([minibatch_size], 0, self._np_labels.shape[0], dtype=tf.int32)) + else: + return tf.zeros([minibatch_size, 0], self.label_dtype) + + # Get random labels as NumPy array. + def get_random_labels_np(self, minibatch_size): # => labels + if self.label_size > 0: + return self._np_labels[np.random.randint(self._np_labels.shape[0], size=[minibatch_size])] + else: + return np.zeros([minibatch_size, 0], self.label_dtype) + +#---------------------------------------------------------------------------- +# Base class for datasets that are generated on the fly. + +class SyntheticDataset: + def __init__(self, resolution=1024, num_channels=3, dtype='uint8', dynamic_range=[0,255], label_size=0, label_dtype='float32'): + self.resolution = resolution + self.resolution_log2 = int(np.log2(resolution)) + self.shape = [num_channels, resolution, resolution] + self.dtype = dtype + self.dynamic_range = dynamic_range + self.label_size = label_size + self.label_dtype = label_dtype + self._tf_minibatch_var = None + self._tf_lod_var = None + self._tf_minibatch_np = None + self._tf_labels_np = None + + assert self.resolution == 2 ** self.resolution_log2 + with tf.name_scope('Dataset'): + self._tf_minibatch_var = tf.Variable(np.int32(0), name='minibatch_var') + self._tf_lod_var = tf.Variable(np.int32(0), name='lod_var') + + def configure(self, minibatch_size, lod=0): + lod = int(np.floor(lod)) + assert minibatch_size >= 1 and lod >= 0 and lod <= self.resolution_log2 + tfutil.set_vars({self._tf_minibatch_var: minibatch_size, self._tf_lod_var: lod}) + + def get_minibatch_tf(self): # => images, labels + with tf.name_scope('SyntheticDataset'): + shrink = tf.cast(2.0 ** tf.cast(self._tf_lod_var, tf.float32), tf.int32) + shape = [self.shape[0], self.shape[1] // shrink, self.shape[2] // shrink] + images = self._generate_images(self._tf_minibatch_var, self._tf_lod_var, shape) + labels = self._generate_labels(self._tf_minibatch_var) + return images, labels + + def get_minibatch_np(self, minibatch_size, lod=0): # => images, labels + self.configure(minibatch_size, lod) + if self._tf_minibatch_np is None: + self._tf_minibatch_np = self.get_minibatch_tf() + return tfutil.run(self._tf_minibatch_np) + + def get_random_labels_tf(self, minibatch_size): # => labels + with tf.name_scope('SyntheticDataset'): + return self._generate_labels(minibatch_size) + + def get_random_labels_np(self, minibatch_size): # => labels + self.configure(minibatch_size) + if self._tf_labels_np is None: + self._tf_labels_np = self.get_random_labels_tf() + return tfutil.run(self._tf_labels_np) + + def _generate_images(self, minibatch, lod, shape): # to be overridden by subclasses + return tf.zeros([minibatch] + shape, self.dtype) + + def _generate_labels(self, minibatch): # to be overridden by subclasses + return tf.zeros([minibatch, self.label_size], self.label_dtype) + +#---------------------------------------------------------------------------- +# Helper func for constructing a dataset object using the given options. + +def load_dataset(class_name='dataset.TFRecordDataset', data_dir=None, verbose=False, **kwargs): + adjusted_kwargs = dict(kwargs) + if 'tfrecord_dir' in adjusted_kwargs and data_dir is not None: + adjusted_kwargs['tfrecord_dir'] = os.path.join(data_dir, adjusted_kwargs['tfrecord_dir']) + if verbose: + print('Streaming data using %s...' % class_name) + dataset = tfutil.import_obj(class_name)(**adjusted_kwargs) + if verbose: + print('Dataset shape =', np.int32(dataset.shape).tolist()) + print('Dynamic range =', dataset.dynamic_range) + print('Label size =', dataset.label_size) + return dataset + +#---------------------------------------------------------------------------- diff --git a/models/pggan_tf_official/dataset_tool.py b/models/pggan_tf_official/dataset_tool.py new file mode 100644 index 0000000000000000000000000000000000000000..f7861cb79fab70fa8060554a17b8e1553310381e --- /dev/null +++ b/models/pggan_tf_official/dataset_tool.py @@ -0,0 +1,740 @@ +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +import os +import sys +import glob +import argparse +import threading +import six.moves.queue as Queue +import traceback +import numpy as np +import tensorflow as tf +import PIL.Image + +import tfutil +import dataset + +#---------------------------------------------------------------------------- + +def error(msg): + print('Error: ' + msg) + exit(1) + +#---------------------------------------------------------------------------- + +class TFRecordExporter: + def __init__(self, tfrecord_dir, expected_images, print_progress=True, progress_interval=10): + self.tfrecord_dir = tfrecord_dir + self.tfr_prefix = os.path.join(self.tfrecord_dir, os.path.basename(self.tfrecord_dir)) + self.expected_images = expected_images + self.cur_images = 0 + self.shape = None + self.resolution_log2 = None + self.tfr_writers = [] + self.print_progress = print_progress + self.progress_interval = progress_interval + if self.print_progress: + print('Creating dataset "%s"' % tfrecord_dir) + if not os.path.isdir(self.tfrecord_dir): + os.makedirs(self.tfrecord_dir) + assert(os.path.isdir(self.tfrecord_dir)) + + def close(self): + if self.print_progress: + print('%-40s\r' % 'Flushing data...', end='', flush=True) + for tfr_writer in self.tfr_writers: + tfr_writer.close() + self.tfr_writers = [] + if self.print_progress: + print('%-40s\r' % '', end='', flush=True) + print('Added %d images.' % self.cur_images) + + def choose_shuffled_order(self): # Note: Images and labels must be added in shuffled order. + order = np.arange(self.expected_images) + np.random.RandomState(123).shuffle(order) + return order + + def add_image(self, img): + if self.print_progress and self.cur_images % self.progress_interval == 0: + print('%d / %d\r' % (self.cur_images, self.expected_images), end='', flush=True) + if self.shape is None: + self.shape = img.shape + self.resolution_log2 = int(np.log2(self.shape[1])) + assert self.shape[0] in [1, 3] + assert self.shape[1] == self.shape[2] + assert self.shape[1] == 2**self.resolution_log2 + tfr_opt = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.NONE) + for lod in range(self.resolution_log2 - 1): + tfr_file = self.tfr_prefix + '-r%02d.tfrecords' % (self.resolution_log2 - lod) + self.tfr_writers.append(tf.python_io.TFRecordWriter(tfr_file, tfr_opt)) + assert img.shape == self.shape + for lod, tfr_writer in enumerate(self.tfr_writers): + if lod: + img = img.astype(np.float32) + img = (img[:, 0::2, 0::2] + img[:, 0::2, 1::2] + img[:, 1::2, 0::2] + img[:, 1::2, 1::2]) * 0.25 + quant = np.rint(img).clip(0, 255).astype(np.uint8) + ex = tf.train.Example(features=tf.train.Features(feature={ + 'shape': tf.train.Feature(int64_list=tf.train.Int64List(value=quant.shape)), + 'data': tf.train.Feature(bytes_list=tf.train.BytesList(value=[quant.tostring()]))})) + tfr_writer.write(ex.SerializeToString()) + self.cur_images += 1 + + def add_labels(self, labels): + if self.print_progress: + print('%-40s\r' % 'Saving labels...', end='', flush=True) + assert labels.shape[0] == self.cur_images + with open(self.tfr_prefix + '-rxx.labels', 'wb') as f: + np.save(f, labels.astype(np.float32)) + + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + +#---------------------------------------------------------------------------- + +class ExceptionInfo(object): + def __init__(self): + self.value = sys.exc_info()[1] + self.traceback = traceback.format_exc() + +#---------------------------------------------------------------------------- + +class WorkerThread(threading.Thread): + def __init__(self, task_queue): + threading.Thread.__init__(self) + self.task_queue = task_queue + + def run(self): + while True: + func, args, result_queue = self.task_queue.get() + if func is None: + break + try: + result = func(*args) + except: + result = ExceptionInfo() + result_queue.put((result, args)) + +#---------------------------------------------------------------------------- + +class ThreadPool(object): + def __init__(self, num_threads): + assert num_threads >= 1 + self.task_queue = Queue.Queue() + self.result_queues = dict() + self.num_threads = num_threads + for idx in range(self.num_threads): + thread = WorkerThread(self.task_queue) + thread.daemon = True + thread.start() + + def add_task(self, func, args=()): + assert hasattr(func, '__call__') # must be a function + if func not in self.result_queues: + self.result_queues[func] = Queue.Queue() + self.task_queue.put((func, args, self.result_queues[func])) + + def get_result(self, func): # returns (result, args) + result, args = self.result_queues[func].get() + if isinstance(result, ExceptionInfo): + print('\n\nWorker thread caught an exception:\n' + result.traceback) + raise result.value + return result, args + + def finish(self): + for idx in range(self.num_threads): + self.task_queue.put((None, (), None)) + + def __enter__(self): # for 'with' statement + return self + + def __exit__(self, *excinfo): + self.finish() + + def process_items_concurrently(self, item_iterator, process_func=lambda x: x, pre_func=lambda x: x, post_func=lambda x: x, max_items_in_flight=None): + if max_items_in_flight is None: max_items_in_flight = self.num_threads * 4 + assert max_items_in_flight >= 1 + results = [] + retire_idx = [0] + + def task_func(prepared, idx): + return process_func(prepared) + + def retire_result(): + processed, (prepared, idx) = self.get_result(task_func) + results[idx] = processed + while retire_idx[0] < len(results) and results[retire_idx[0]] is not None: + yield post_func(results[retire_idx[0]]) + results[retire_idx[0]] = None + retire_idx[0] += 1 + + for idx, item in enumerate(item_iterator): + prepared = pre_func(item) + results.append(None) + self.add_task(func=task_func, args=(prepared, idx)) + while retire_idx[0] < idx - max_items_in_flight + 2: + for res in retire_result(): yield res + while retire_idx[0] < len(results): + for res in retire_result(): yield res + +#---------------------------------------------------------------------------- + +def display(tfrecord_dir): + print('Loading dataset "%s"' % tfrecord_dir) + tfutil.init_tf({'gpu_options.allow_growth': True}) + dset = dataset.TFRecordDataset(tfrecord_dir, max_label_size='full', repeat=False, shuffle_mb=0) + tfutil.init_uninited_vars() + + idx = 0 + while True: + try: + images, labels = dset.get_minibatch_np(1) + except tf.errors.OutOfRangeError: + break + if idx == 0: + print('Displaying images') + import cv2 # pip install opencv-python + cv2.namedWindow('dataset_tool') + print('Press SPACE or ENTER to advance, ESC to exit') + print('\nidx = %-8d\nlabel = %s' % (idx, labels[0].tolist())) + cv2.imshow('dataset_tool', images[0].transpose(1, 2, 0)[:, :, ::-1]) # CHW => HWC, RGB => BGR + idx += 1 + if cv2.waitKey() == 27: + break + print('\nDisplayed %d images.' % idx) + +#---------------------------------------------------------------------------- + +def extract(tfrecord_dir, output_dir): + print('Loading dataset "%s"' % tfrecord_dir) + tfutil.init_tf({'gpu_options.allow_growth': True}) + dset = dataset.TFRecordDataset(tfrecord_dir, max_label_size=0, repeat=False, shuffle_mb=0) + tfutil.init_uninited_vars() + + print('Extracting images to "%s"' % output_dir) + if not os.path.isdir(output_dir): + os.makedirs(output_dir) + idx = 0 + while True: + if idx % 10 == 0: + print('%d\r' % idx, end='', flush=True) + try: + images, labels = dset.get_minibatch_np(1) + except tf.errors.OutOfRangeError: + break + if images.shape[1] == 1: + img = PIL.Image.fromarray(images[0][0], 'L') + else: + img = PIL.Image.fromarray(images[0].transpose(1, 2, 0), 'RGB') + img.save(os.path.join(output_dir, 'img%08d.png' % idx)) + idx += 1 + print('Extracted %d images.' % idx) + +#---------------------------------------------------------------------------- + +def compare(tfrecord_dir_a, tfrecord_dir_b, ignore_labels): + max_label_size = 0 if ignore_labels else 'full' + print('Loading dataset "%s"' % tfrecord_dir_a) + tfutil.init_tf({'gpu_options.allow_growth': True}) + dset_a = dataset.TFRecordDataset(tfrecord_dir_a, max_label_size=max_label_size, repeat=False, shuffle_mb=0) + print('Loading dataset "%s"' % tfrecord_dir_b) + dset_b = dataset.TFRecordDataset(tfrecord_dir_b, max_label_size=max_label_size, repeat=False, shuffle_mb=0) + tfutil.init_uninited_vars() + + print('Comparing datasets') + idx = 0 + identical_images = 0 + identical_labels = 0 + while True: + if idx % 100 == 0: + print('%d\r' % idx, end='', flush=True) + try: + images_a, labels_a = dset_a.get_minibatch_np(1) + except tf.errors.OutOfRangeError: + images_a, labels_a = None, None + try: + images_b, labels_b = dset_b.get_minibatch_np(1) + except tf.errors.OutOfRangeError: + images_b, labels_b = None, None + if images_a is None or images_b is None: + if images_a is not None or images_b is not None: + print('Datasets contain different number of images') + break + if images_a.shape == images_b.shape and np.all(images_a == images_b): + identical_images += 1 + else: + print('Image %d is different' % idx) + if labels_a.shape == labels_b.shape and np.all(labels_a == labels_b): + identical_labels += 1 + else: + print('Label %d is different' % idx) + idx += 1 + print('Identical images: %d / %d' % (identical_images, idx)) + if not ignore_labels: + print('Identical labels: %d / %d' % (identical_labels, idx)) + +#---------------------------------------------------------------------------- + +def create_mnist(tfrecord_dir, mnist_dir): + print('Loading MNIST from "%s"' % mnist_dir) + import gzip + with gzip.open(os.path.join(mnist_dir, 'train-images-idx3-ubyte.gz'), 'rb') as file: + images = np.frombuffer(file.read(), np.uint8, offset=16) + with gzip.open(os.path.join(mnist_dir, 'train-labels-idx1-ubyte.gz'), 'rb') as file: + labels = np.frombuffer(file.read(), np.uint8, offset=8) + images = images.reshape(-1, 1, 28, 28) + images = np.pad(images, [(0,0), (0,0), (2,2), (2,2)], 'constant', constant_values=0) + assert images.shape == (60000, 1, 32, 32) and images.dtype == np.uint8 + assert labels.shape == (60000,) and labels.dtype == np.uint8 + assert np.min(images) == 0 and np.max(images) == 255 + assert np.min(labels) == 0 and np.max(labels) == 9 + onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32) + onehot[np.arange(labels.size), labels] = 1.0 + + with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr: + order = tfr.choose_shuffled_order() + for idx in range(order.size): + tfr.add_image(images[order[idx]]) + tfr.add_labels(onehot[order]) + +#---------------------------------------------------------------------------- + +def create_mnistrgb(tfrecord_dir, mnist_dir, num_images=1000000, random_seed=123): + print('Loading MNIST from "%s"' % mnist_dir) + import gzip + with gzip.open(os.path.join(mnist_dir, 'train-images-idx3-ubyte.gz'), 'rb') as file: + images = np.frombuffer(file.read(), np.uint8, offset=16) + images = images.reshape(-1, 28, 28) + images = np.pad(images, [(0,0), (2,2), (2,2)], 'constant', constant_values=0) + assert images.shape == (60000, 32, 32) and images.dtype == np.uint8 + assert np.min(images) == 0 and np.max(images) == 255 + + with TFRecordExporter(tfrecord_dir, num_images) as tfr: + rnd = np.random.RandomState(random_seed) + for idx in range(num_images): + tfr.add_image(images[rnd.randint(images.shape[0], size=3)]) + +#---------------------------------------------------------------------------- + +def create_cifar10(tfrecord_dir, cifar10_dir): + print('Loading CIFAR-10 from "%s"' % cifar10_dir) + import pickle + images = [] + labels = [] + for batch in range(1, 6): + with open(os.path.join(cifar10_dir, 'data_batch_%d' % batch), 'rb') as file: + data = pickle.load(file, encoding='latin1') + images.append(data['data'].reshape(-1, 3, 32, 32)) + labels.append(data['labels']) + images = np.concatenate(images) + labels = np.concatenate(labels) + assert images.shape == (50000, 3, 32, 32) and images.dtype == np.uint8 + assert labels.shape == (50000,) and labels.dtype == np.int32 + assert np.min(images) == 0 and np.max(images) == 255 + assert np.min(labels) == 0 and np.max(labels) == 9 + onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32) + onehot[np.arange(labels.size), labels] = 1.0 + + with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr: + order = tfr.choose_shuffled_order() + for idx in range(order.size): + tfr.add_image(images[order[idx]]) + tfr.add_labels(onehot[order]) + +#---------------------------------------------------------------------------- + +def create_cifar100(tfrecord_dir, cifar100_dir): + print('Loading CIFAR-100 from "%s"' % cifar100_dir) + import pickle + with open(os.path.join(cifar100_dir, 'train'), 'rb') as file: + data = pickle.load(file, encoding='latin1') + images = data['data'].reshape(-1, 3, 32, 32) + labels = np.array(data['fine_labels']) + assert images.shape == (50000, 3, 32, 32) and images.dtype == np.uint8 + assert labels.shape == (50000,) and labels.dtype == np.int32 + assert np.min(images) == 0 and np.max(images) == 255 + assert np.min(labels) == 0 and np.max(labels) == 99 + onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32) + onehot[np.arange(labels.size), labels] = 1.0 + + with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr: + order = tfr.choose_shuffled_order() + for idx in range(order.size): + tfr.add_image(images[order[idx]]) + tfr.add_labels(onehot[order]) + +#---------------------------------------------------------------------------- + +def create_svhn(tfrecord_dir, svhn_dir): + print('Loading SVHN from "%s"' % svhn_dir) + import pickle + images = [] + labels = [] + for batch in range(1, 4): + with open(os.path.join(svhn_dir, 'train_%d.pkl' % batch), 'rb') as file: + data = pickle.load(file, encoding='latin1') + images.append(data[0]) + labels.append(data[1]) + images = np.concatenate(images) + labels = np.concatenate(labels) + assert images.shape == (73257, 3, 32, 32) and images.dtype == np.uint8 + assert labels.shape == (73257,) and labels.dtype == np.uint8 + assert np.min(images) == 0 and np.max(images) == 255 + assert np.min(labels) == 0 and np.max(labels) == 9 + onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32) + onehot[np.arange(labels.size), labels] = 1.0 + + with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr: + order = tfr.choose_shuffled_order() + for idx in range(order.size): + tfr.add_image(images[order[idx]]) + tfr.add_labels(onehot[order]) + +#---------------------------------------------------------------------------- + +def create_lsun(tfrecord_dir, lmdb_dir, resolution=256, max_images=None): + print('Loading LSUN dataset from "%s"' % lmdb_dir) + import lmdb # pip install lmdb + import cv2 # pip install opencv-python + import io + with lmdb.open(lmdb_dir, readonly=True).begin(write=False) as txn: + total_images = txn.stat()['entries'] + if max_images is None: + max_images = total_images + with TFRecordExporter(tfrecord_dir, max_images) as tfr: + for idx, (key, value) in enumerate(txn.cursor()): + try: + try: + img = cv2.imdecode(np.fromstring(value, dtype=np.uint8), 1) + if img is None: + raise IOError('cv2.imdecode failed') + img = img[:, :, ::-1] # BGR => RGB + except IOError: + img = np.asarray(PIL.Image.open(io.BytesIO(value))) + crop = np.min(img.shape[:2]) + img = img[(img.shape[0] - crop) // 2 : (img.shape[0] + crop) // 2, (img.shape[1] - crop) // 2 : (img.shape[1] + crop) // 2] + img = PIL.Image.fromarray(img, 'RGB') + img = img.resize((resolution, resolution), PIL.Image.ANTIALIAS) + img = np.asarray(img) + img = img.transpose(2, 0, 1) # HWC => CHW + tfr.add_image(img) + except: + print(sys.exc_info()[1]) + if tfr.cur_images == max_images: + break + +#---------------------------------------------------------------------------- + +def create_celeba(tfrecord_dir, celeba_dir, cx=89, cy=121): + print('Loading CelebA from "%s"' % celeba_dir) + glob_pattern = os.path.join(celeba_dir, 'img_align_celeba_png', '*.png') + image_filenames = sorted(glob.glob(glob_pattern)) + expected_images = 202599 + if len(image_filenames) != expected_images: + error('Expected to find %d images' % expected_images) + + with TFRecordExporter(tfrecord_dir, len(image_filenames)) as tfr: + order = tfr.choose_shuffled_order() + for idx in range(order.size): + img = np.asarray(PIL.Image.open(image_filenames[order[idx]])) + assert img.shape == (218, 178, 3) + img = img[cy - 64 : cy + 64, cx - 64 : cx + 64] + img = img.transpose(2, 0, 1) # HWC => CHW + tfr.add_image(img) + +#---------------------------------------------------------------------------- + +def create_celebahq(tfrecord_dir, celeba_dir, delta_dir, num_threads=4, num_tasks=100): + print('Loading CelebA from "%s"' % celeba_dir) + expected_images = 202599 + if len(glob.glob(os.path.join(celeba_dir, 'img_celeba', '*.jpg'))) != expected_images: + error('Expected to find %d images' % expected_images) + with open(os.path.join(celeba_dir, 'Anno', 'list_landmarks_celeba.txt'), 'rt') as file: + landmarks = [[float(value) for value in line.split()[1:]] for line in file.readlines()[2:]] + landmarks = np.float32(landmarks).reshape(-1, 5, 2) + + print('Loading CelebA-HQ deltas from "%s"' % delta_dir) + import scipy.ndimage + import hashlib + import bz2 + import zipfile + import base64 + import cryptography.hazmat.primitives.hashes + import cryptography.hazmat.backends + import cryptography.hazmat.primitives.kdf.pbkdf2 + import cryptography.fernet + expected_zips = 30 + if len(glob.glob(os.path.join(delta_dir, 'delta*.zip'))) != expected_zips: + error('Expected to find %d zips' % expected_zips) + with open(os.path.join(delta_dir, 'image_list.txt'), 'rt') as file: + lines = [line.split() for line in file] + fields = dict() + for idx, field in enumerate(lines[0]): + type = int if field.endswith('idx') else str + fields[field] = [type(line[idx]) for line in lines[1:]] + indices = np.array(fields['idx']) + + # Must use pillow version 3.1.1 for everything to work correctly. + if getattr(PIL, 'PILLOW_VERSION', '') != '3.1.1': + error('create_celebahq requires pillow version 3.1.1') # conda install pillow=3.1.1 + + # Must use libjpeg version 8d for everything to work correctly. + img = np.array(PIL.Image.open(os.path.join(celeba_dir, 'img_celeba', '000001.jpg'))) + md5 = hashlib.md5() + md5.update(img.tobytes()) + if md5.hexdigest() != '9cad8178d6cb0196b36f7b34bc5eb6d3': + error('create_celebahq requires libjpeg version 8d') # conda install jpeg=8d + + def rot90(v): + return np.array([-v[1], v[0]]) + + def process_func(idx): + # Load original image. + orig_idx = fields['orig_idx'][idx] + orig_file = fields['orig_file'][idx] + orig_path = os.path.join(celeba_dir, 'img_celeba', orig_file) + img = PIL.Image.open(orig_path) + + # Choose oriented crop rectangle. + lm = landmarks[orig_idx] + eye_avg = (lm[0] + lm[1]) * 0.5 + 0.5 + mouth_avg = (lm[3] + lm[4]) * 0.5 + 0.5 + eye_to_eye = lm[1] - lm[0] + eye_to_mouth = mouth_avg - eye_avg + x = eye_to_eye - rot90(eye_to_mouth) + x /= np.hypot(*x) + x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8) + y = rot90(x) + c = eye_avg + eye_to_mouth * 0.1 + quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y]) + zoom = 1024 / (np.hypot(*x) * 2) + + # Shrink. + shrink = int(np.floor(0.5 / zoom)) + if shrink > 1: + size = (int(np.round(float(img.size[0]) / shrink)), int(np.round(float(img.size[1]) / shrink))) + img = img.resize(size, PIL.Image.ANTIALIAS) + quad /= shrink + zoom *= shrink + + # Crop. + border = max(int(np.round(1024 * 0.1 / zoom)), 3) + crop = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1])))) + crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), min(crop[3] + border, img.size[1])) + if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]: + img = img.crop(crop) + quad -= crop[0:2] + + # Simulate super-resolution. + superres = int(np.exp2(np.ceil(np.log2(zoom)))) + if superres > 1: + img = img.resize((img.size[0] * superres, img.size[1] * superres), PIL.Image.ANTIALIAS) + quad *= superres + zoom /= superres + + # Pad. + pad = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1])))) + pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0), max(pad[3] - img.size[1] + border, 0)) + if max(pad) > border - 4: + pad = np.maximum(pad, int(np.round(1024 * 0.3 / zoom))) + img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect') + h, w, _ = img.shape + y, x, _ = np.mgrid[:h, :w, :1] + mask = 1.0 - np.minimum(np.minimum(np.float32(x) / pad[0], np.float32(y) / pad[1]), np.minimum(np.float32(w-1-x) / pad[2], np.float32(h-1-y) / pad[3])) + blur = 1024 * 0.02 / zoom + img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0) + img += (np.median(img, axis=(0,1)) - img) * np.clip(mask, 0.0, 1.0) + img = PIL.Image.fromarray(np.uint8(np.clip(np.round(img), 0, 255)), 'RGB') + quad += pad[0:2] + + # Transform. + img = img.transform((4096, 4096), PIL.Image.QUAD, (quad + 0.5).flatten(), PIL.Image.BILINEAR) + img = img.resize((1024, 1024), PIL.Image.ANTIALIAS) + img = np.asarray(img).transpose(2, 0, 1) + + # Verify MD5. + md5 = hashlib.md5() + md5.update(img.tobytes()) + assert md5.hexdigest() == fields['proc_md5'][idx] + + # Load delta image and original JPG. + with zipfile.ZipFile(os.path.join(delta_dir, 'deltas%05d.zip' % (idx - idx % 1000)), 'r') as zip: + delta_bytes = zip.read('delta%05d.dat' % idx) + with open(orig_path, 'rb') as file: + orig_bytes = file.read() + + # Decrypt delta image, using original JPG data as decryption key. + algorithm = cryptography.hazmat.primitives.hashes.SHA256() + backend = cryptography.hazmat.backends.default_backend() + salt = bytes(orig_file, 'ascii') + kdf = cryptography.hazmat.primitives.kdf.pbkdf2.PBKDF2HMAC(algorithm=algorithm, length=32, salt=salt, iterations=100000, backend=backend) + key = base64.urlsafe_b64encode(kdf.derive(orig_bytes)) + delta = np.frombuffer(bz2.decompress(cryptography.fernet.Fernet(key).decrypt(delta_bytes)), dtype=np.uint8).reshape(3, 1024, 1024) + + # Apply delta image. + img = img + delta + + # Verify MD5. + md5 = hashlib.md5() + md5.update(img.tobytes()) + assert md5.hexdigest() == fields['final_md5'][idx] + return img + + with TFRecordExporter(tfrecord_dir, indices.size) as tfr: + order = tfr.choose_shuffled_order() + with ThreadPool(num_threads) as pool: + for img in pool.process_items_concurrently(indices[order].tolist(), process_func=process_func, max_items_in_flight=num_tasks): + tfr.add_image(img) + +#---------------------------------------------------------------------------- + +def create_from_images(tfrecord_dir, image_dir, shuffle): + print('Loading images from "%s"' % image_dir) + image_filenames = sorted(glob.glob(os.path.join(image_dir, '*'))) + if len(image_filenames) == 0: + error('No input images found') + + img = np.asarray(PIL.Image.open(image_filenames[0])) + resolution = img.shape[0] + channels = img.shape[2] if img.ndim == 3 else 1 + if img.shape[1] != resolution: + error('Input images must have the same width and height') + if resolution != 2 ** int(np.floor(np.log2(resolution))): + error('Input image resolution must be a power-of-two') + if channels not in [1, 3]: + error('Input images must be stored as RGB or grayscale') + + with TFRecordExporter(tfrecord_dir, len(image_filenames)) as tfr: + order = tfr.choose_shuffled_order() if shuffle else np.arange(len(image_filenames)) + for idx in range(order.size): + img = np.asarray(PIL.Image.open(image_filenames[order[idx]])) + if channels == 1: + img = img[np.newaxis, :, :] # HW => CHW + else: + img = img.transpose(2, 0, 1) # HWC => CHW + tfr.add_image(img) + +#---------------------------------------------------------------------------- + +def create_from_hdf5(tfrecord_dir, hdf5_filename, shuffle): + print('Loading HDF5 archive from "%s"' % hdf5_filename) + import h5py # conda install h5py + with h5py.File(hdf5_filename, 'r') as hdf5_file: + hdf5_data = max([value for key, value in hdf5_file.items() if key.startswith('data')], key=lambda lod: lod.shape[3]) + with TFRecordExporter(tfrecord_dir, hdf5_data.shape[0]) as tfr: + order = tfr.choose_shuffled_order() if shuffle else np.arange(hdf5_data.shape[0]) + for idx in range(order.size): + tfr.add_image(hdf5_data[order[idx]]) + npy_filename = os.path.splitext(hdf5_filename)[0] + '-labels.npy' + if os.path.isfile(npy_filename): + tfr.add_labels(np.load(npy_filename)[order]) + +#---------------------------------------------------------------------------- + +def execute_cmdline(argv): + prog = argv[0] + parser = argparse.ArgumentParser( + prog = prog, + description = 'Tool for creating, extracting, and visualizing Progressive GAN datasets.', + epilog = 'Type "%s -h" for more information.' % prog) + + subparsers = parser.add_subparsers(dest='command') + subparsers.required = True + def add_command(cmd, desc, example=None): + epilog = 'Example: %s %s' % (prog, example) if example is not None else None + return subparsers.add_parser(cmd, description=desc, help=desc, epilog=epilog) + + p = add_command( 'display', 'Display images in dataset.', + 'display datasets/mnist') + p.add_argument( 'tfrecord_dir', help='Directory containing dataset') + + p = add_command( 'extract', 'Extract images from dataset.', + 'extract datasets/mnist mnist-images') + p.add_argument( 'tfrecord_dir', help='Directory containing dataset') + p.add_argument( 'output_dir', help='Directory to extract the images into') + + p = add_command( 'compare', 'Compare two datasets.', + 'compare datasets/mydataset datasets/mnist') + p.add_argument( 'tfrecord_dir_a', help='Directory containing first dataset') + p.add_argument( 'tfrecord_dir_b', help='Directory containing second dataset') + p.add_argument( '--ignore_labels', help='Ignore labels (default: 0)', type=int, default=0) + + p = add_command( 'create_mnist', 'Create dataset for MNIST.', + 'create_mnist datasets/mnist ~/downloads/mnist') + p.add_argument( 'tfrecord_dir', help='New dataset directory to be created') + p.add_argument( 'mnist_dir', help='Directory containing MNIST') + + p = add_command( 'create_mnistrgb', 'Create dataset for MNIST-RGB.', + 'create_mnistrgb datasets/mnistrgb ~/downloads/mnist') + p.add_argument( 'tfrecord_dir', help='New dataset directory to be created') + p.add_argument( 'mnist_dir', help='Directory containing MNIST') + p.add_argument( '--num_images', help='Number of composite images to create (default: 1000000)', type=int, default=1000000) + p.add_argument( '--random_seed', help='Random seed (default: 123)', type=int, default=123) + + p = add_command( 'create_cifar10', 'Create dataset for CIFAR-10.', + 'create_cifar10 datasets/cifar10 ~/downloads/cifar10') + p.add_argument( 'tfrecord_dir', help='New dataset directory to be created') + p.add_argument( 'cifar10_dir', help='Directory containing CIFAR-10') + + p = add_command( 'create_cifar100', 'Create dataset for CIFAR-100.', + 'create_cifar100 datasets/cifar100 ~/downloads/cifar100') + p.add_argument( 'tfrecord_dir', help='New dataset directory to be created') + p.add_argument( 'cifar100_dir', help='Directory containing CIFAR-100') + + p = add_command( 'create_svhn', 'Create dataset for SVHN.', + 'create_svhn datasets/svhn ~/downloads/svhn') + p.add_argument( 'tfrecord_dir', help='New dataset directory to be created') + p.add_argument( 'svhn_dir', help='Directory containing SVHN') + + p = add_command( 'create_lsun', 'Create dataset for single LSUN category.', + 'create_lsun datasets/lsun-car-100k ~/downloads/lsun/car_lmdb --resolution 256 --max_images 100000') + p.add_argument( 'tfrecord_dir', help='New dataset directory to be created') + p.add_argument( 'lmdb_dir', help='Directory containing LMDB database') + p.add_argument( '--resolution', help='Output resolution (default: 256)', type=int, default=256) + p.add_argument( '--max_images', help='Maximum number of images (default: none)', type=int, default=None) + + p = add_command( 'create_celeba', 'Create dataset for CelebA.', + 'create_celeba datasets/celeba ~/downloads/celeba') + p.add_argument( 'tfrecord_dir', help='New dataset directory to be created') + p.add_argument( 'celeba_dir', help='Directory containing CelebA') + p.add_argument( '--cx', help='Center X coordinate (default: 89)', type=int, default=89) + p.add_argument( '--cy', help='Center Y coordinate (default: 121)', type=int, default=121) + + p = add_command( 'create_celebahq', 'Create dataset for CelebA-HQ.', + 'create_celebahq datasets/celebahq ~/downloads/celeba ~/downloads/celeba-hq-deltas') + p.add_argument( 'tfrecord_dir', help='New dataset directory to be created') + p.add_argument( 'celeba_dir', help='Directory containing CelebA') + p.add_argument( 'delta_dir', help='Directory containing CelebA-HQ deltas') + p.add_argument( '--num_threads', help='Number of concurrent threads (default: 4)', type=int, default=4) + p.add_argument( '--num_tasks', help='Number of concurrent processing tasks (default: 100)', type=int, default=100) + + p = add_command( 'create_from_images', 'Create dataset from a directory full of images.', + 'create_from_images datasets/mydataset myimagedir') + p.add_argument( 'tfrecord_dir', help='New dataset directory to be created') + p.add_argument( 'image_dir', help='Directory containing the images') + p.add_argument( '--shuffle', help='Randomize image order (default: 1)', type=int, default=1) + + p = add_command( 'create_from_hdf5', 'Create dataset from legacy HDF5 archive.', + 'create_from_hdf5 datasets/celebahq ~/downloads/celeba-hq-1024x1024.h5') + p.add_argument( 'tfrecord_dir', help='New dataset directory to be created') + p.add_argument( 'hdf5_filename', help='HDF5 archive containing the images') + p.add_argument( '--shuffle', help='Randomize image order (default: 1)', type=int, default=1) + + args = parser.parse_args(argv[1:] if len(argv) > 1 else ['-h']) + func = globals()[args.command] + del args.command + func(**vars(args)) + +#---------------------------------------------------------------------------- + +if __name__ == "__main__": + execute_cmdline(sys.argv) + +#---------------------------------------------------------------------------- diff --git a/models/pggan_tf_official/legacy.py b/models/pggan_tf_official/legacy.py new file mode 100644 index 0000000000000000000000000000000000000000..ebce17987b5515fad02c310c0e1c7565942c80ea --- /dev/null +++ b/models/pggan_tf_official/legacy.py @@ -0,0 +1,117 @@ +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +import pickle +import inspect +import numpy as np + +import tfutil +import networks + +#---------------------------------------------------------------------------- +# Custom unpickler that is able to load network pickles produced by +# the old Theano implementation. + +class LegacyUnpickler(pickle.Unpickler): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def find_class(self, module, name): + if module == 'network' and name == 'Network': + return tfutil.Network + return super().find_class(module, name) + +#---------------------------------------------------------------------------- +# Import handler for tfutil.Network that silently converts networks produced +# by the old Theano implementation to a suitable format. + +theano_gan_remap = { + 'G_paper': 'G_paper', + 'G_progressive_8': 'G_paper', + 'D_paper': 'D_paper', + 'D_progressive_8': 'D_paper'} + +def patch_theano_gan(state): + if 'version' in state or state['build_func_spec']['func'] not in theano_gan_remap: + return state + + spec = dict(state['build_func_spec']) + func = spec.pop('func') + resolution = spec.get('resolution', 32) + resolution_log2 = int(np.log2(resolution)) + use_wscale = spec.get('use_wscale', True) + + assert spec.pop('label_size', 0) == 0 + assert spec.pop('use_batchnorm', False) == False + assert spec.pop('tanh_at_end', None) is None + assert spec.pop('mbstat_func', 'Tstdeps') == 'Tstdeps' + assert spec.pop('mbstat_avg', 'all') == 'all' + assert spec.pop('mbdisc_kernels', None) is None + spec.pop( 'use_gdrop', True) # doesn't make a difference + assert spec.pop('use_layernorm', False) == False + spec[ 'fused_scale'] = False + spec[ 'mbstd_group_size'] = 16 + + vars = [] + param_iter = iter(state['param_values']) + relu = np.sqrt(2); linear = 1.0 + def flatten2(w): return w.reshape(w.shape[0], -1) + def he_std(gain, w): return gain / np.sqrt(np.prod(w.shape[:-1])) + def wscale(gain, w): return w * next(param_iter) / he_std(gain, w) if use_wscale else w + def layer(name, gain, w): return [(name + '/weight', wscale(gain, w)), (name + '/bias', next(param_iter))] + + if func.startswith('G'): + vars += layer('4x4/Dense', relu/4, flatten2(next(param_iter).transpose(1,0,2,3))) + vars += layer('4x4/Conv', relu, next(param_iter).transpose(2,3,1,0)[::-1,::-1]) + for res in range(3, resolution_log2 + 1): + vars += layer('%dx%d/Conv0' % (2**res, 2**res), relu, next(param_iter).transpose(2,3,1,0)[::-1,::-1]) + vars += layer('%dx%d/Conv1' % (2**res, 2**res), relu, next(param_iter).transpose(2,3,1,0)[::-1,::-1]) + for lod in range(0, resolution_log2 - 1): + vars += layer('ToRGB_lod%d' % lod, linear, next(param_iter)[np.newaxis, np.newaxis]) + + if func.startswith('D'): + vars += layer('FromRGB_lod0', relu, next(param_iter)[np.newaxis, np.newaxis]) + for res in range(resolution_log2, 2, -1): + vars += layer('%dx%d/Conv0' % (2**res, 2**res), relu, next(param_iter).transpose(2,3,1,0)[::-1,::-1]) + vars += layer('%dx%d/Conv1' % (2**res, 2**res), relu, next(param_iter).transpose(2,3,1,0)[::-1,::-1]) + vars += layer('FromRGB_lod%d' % (resolution_log2 - (res - 1)), relu, next(param_iter)[np.newaxis, np.newaxis]) + vars += layer('4x4/Conv', relu, next(param_iter).transpose(2,3,1,0)[::-1,::-1]) + vars += layer('4x4/Dense0', relu, flatten2(next(param_iter)[:,:,::-1,::-1]).transpose()) + vars += layer('4x4/Dense1', linear, next(param_iter)) + + vars += [('lod', state['toplevel_params']['cur_lod'])] + + return { + 'version': 2, + 'name': func, + 'build_module_src': inspect.getsource(networks), + 'build_func_name': theano_gan_remap[func], + 'static_kwargs': spec, + 'variables': vars} + +tfutil.network_import_handlers.append(patch_theano_gan) + +#---------------------------------------------------------------------------- +# Import handler for tfutil.Network that ignores unsupported/deprecated +# networks produced by older versions of the code. + +def ignore_unknown_theano_network(state): + if 'version' in state: + return state + + print('Ignoring unknown Theano network:', state['build_func_spec']['func']) + return { + 'version': 2, + 'name': 'Dummy', + 'build_module_src': 'def dummy(input, **kwargs): input.set_shape([None, 1]); return input', + 'build_func_name': 'dummy', + 'static_kwargs': {}, + 'variables': []} + +tfutil.network_import_handlers.append(ignore_unknown_theano_network) + +#---------------------------------------------------------------------------- diff --git a/models/pggan_tf_official/loss.py b/models/pggan_tf_official/loss.py new file mode 100644 index 0000000000000000000000000000000000000000..b485d50954c01e99dd5568fe4b91aaca5599902a --- /dev/null +++ b/models/pggan_tf_official/loss.py @@ -0,0 +1,82 @@ +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +import numpy as np +import tensorflow as tf + +import tfutil + +#---------------------------------------------------------------------------- +# Convenience func that casts all of its arguments to tf.float32. + +def fp32(*values): + if len(values) == 1 and isinstance(values[0], tuple): + values = values[0] + values = tuple(tf.cast(v, tf.float32) for v in values) + return values if len(values) >= 2 else values[0] + +#---------------------------------------------------------------------------- +# Generator loss function used in the paper (WGAN + AC-GAN). + +def G_wgan_acgan(G, D, opt, training_set, minibatch_size, + cond_weight = 1.0): # Weight of the conditioning term. + + latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:]) + labels = training_set.get_random_labels_tf(minibatch_size) + fake_images_out = G.get_output_for(latents, labels, is_training=True) + fake_scores_out, fake_labels_out = fp32(D.get_output_for(fake_images_out, is_training=True)) + loss = -fake_scores_out + + if D.output_shapes[1][1] > 0: + with tf.name_scope('LabelPenalty'): + label_penalty_fakes = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=fake_labels_out) + loss += label_penalty_fakes * cond_weight + return loss + +#---------------------------------------------------------------------------- +# Discriminator loss function used in the paper (WGAN-GP + AC-GAN). + +def D_wgangp_acgan(G, D, opt, training_set, minibatch_size, reals, labels, + wgan_lambda = 10.0, # Weight for the gradient penalty term. + wgan_epsilon = 0.001, # Weight for the epsilon term, \epsilon_{drift}. + wgan_target = 1.0, # Target value for gradient magnitudes. + cond_weight = 1.0): # Weight of the conditioning terms. + + latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:]) + fake_images_out = G.get_output_for(latents, labels, is_training=True) + real_scores_out, real_labels_out = fp32(D.get_output_for(reals, is_training=True)) + fake_scores_out, fake_labels_out = fp32(D.get_output_for(fake_images_out, is_training=True)) + real_scores_out = tfutil.autosummary('Loss/real_scores', real_scores_out) + fake_scores_out = tfutil.autosummary('Loss/fake_scores', fake_scores_out) + loss = fake_scores_out - real_scores_out + + with tf.name_scope('GradientPenalty'): + mixing_factors = tf.random_uniform([minibatch_size, 1, 1, 1], 0.0, 1.0, dtype=fake_images_out.dtype) + mixed_images_out = tfutil.lerp(tf.cast(reals, fake_images_out.dtype), fake_images_out, mixing_factors) + mixed_scores_out, mixed_labels_out = fp32(D.get_output_for(mixed_images_out, is_training=True)) + mixed_scores_out = tfutil.autosummary('Loss/mixed_scores', mixed_scores_out) + mixed_loss = opt.apply_loss_scaling(tf.reduce_sum(mixed_scores_out)) + mixed_grads = opt.undo_loss_scaling(fp32(tf.gradients(mixed_loss, [mixed_images_out])[0])) + mixed_norms = tf.sqrt(tf.reduce_sum(tf.square(mixed_grads), axis=[1,2,3])) + mixed_norms = tfutil.autosummary('Loss/mixed_norms', mixed_norms) + gradient_penalty = tf.square(mixed_norms - wgan_target) + loss += gradient_penalty * (wgan_lambda / (wgan_target**2)) + + with tf.name_scope('EpsilonPenalty'): + epsilon_penalty = tfutil.autosummary('Loss/epsilon_penalty', tf.square(real_scores_out)) + loss += epsilon_penalty * wgan_epsilon + + if D.output_shapes[1][1] > 0: + with tf.name_scope('LabelPenalty'): + label_penalty_reals = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=real_labels_out) + label_penalty_fakes = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=fake_labels_out) + label_penalty_reals = tfutil.autosummary('Loss/label_penalty_reals', label_penalty_reals) + label_penalty_fakes = tfutil.autosummary('Loss/label_penalty_fakes', label_penalty_fakes) + loss += (label_penalty_reals + label_penalty_fakes) * cond_weight + return loss + +#---------------------------------------------------------------------------- diff --git a/models/pggan_tf_official/metrics/__init__.py b/models/pggan_tf_official/metrics/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1bb8bf6d7fd4c8d09aea89b47de20fb8bbb61626 --- /dev/null +++ b/models/pggan_tf_official/metrics/__init__.py @@ -0,0 +1 @@ +# empty diff --git a/models/pggan_tf_official/metrics/frechet_inception_distance.py b/models/pggan_tf_official/metrics/frechet_inception_distance.py new file mode 100644 index 0000000000000000000000000000000000000000..565bd36e8f587a5ceec441710f6fdae2ce14fe99 --- /dev/null +++ b/models/pggan_tf_official/metrics/frechet_inception_distance.py @@ -0,0 +1,281 @@ +#!/usr/bin/env python3 +# +# Copyright 2017 Martin Heusel +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Adapted from the original implementation by Martin Heusel. +# Source https://github.com/bioinf-jku/TTUR/blob/master/fid.py + +''' Calculates the Frechet Inception Distance (FID) to evalulate GANs. + +The FID metric calculates the distance between two distributions of images. +Typically, we have summary statistics (mean & covariance matrix) of one +of these distributions, while the 2nd distribution is given by a GAN. + +When run as a stand-alone program, it compares the distribution of +images that are stored as PNG/JPEG at a specified location with a +distribution given by summary statistics (in pickle format). + +The FID is calculated by assuming that X_1 and X_2 are the activations of +the pool_3 layer of the inception net for generated samples and real world +samples respectivly. + +See --help to see further details. +''' + +from __future__ import absolute_import, division, print_function +import numpy as np +import scipy as sp +import os +import gzip, pickle +import tensorflow as tf +from scipy.misc import imread +import pathlib +import urllib + + +class InvalidFIDException(Exception): + pass + + +def create_inception_graph(pth): + """Creates a graph from saved GraphDef file.""" + # Creates graph from saved graph_def.pb. + with tf.gfile.FastGFile( pth, 'rb') as f: + graph_def = tf.GraphDef() + graph_def.ParseFromString( f.read()) + _ = tf.import_graph_def( graph_def, name='FID_Inception_Net') +#------------------------------------------------------------------------------- + + +# code for handling inception net derived from +# https://github.com/openai/improved-gan/blob/master/inception_score/model.py +def _get_inception_layer(sess): + """Prepares inception net for batched usage and returns pool_3 layer. """ + layername = 'FID_Inception_Net/pool_3:0' + pool3 = sess.graph.get_tensor_by_name(layername) + ops = pool3.graph.get_operations() + for op_idx, op in enumerate(ops): + for o in op.outputs: + shape = o.get_shape() + if shape._dims is not None: + shape = [s.value for s in shape] + new_shape = [] + for j, s in enumerate(shape): + if s == 1 and j == 0: + new_shape.append(None) + else: + new_shape.append(s) + try: + o._shape = tf.TensorShape(new_shape) + except ValueError: + o._shape_val = tf.TensorShape(new_shape) # EDIT: added for compatibility with tensorflow 1.6.0 + return pool3 +#------------------------------------------------------------------------------- + + +def get_activations(images, sess, batch_size=50, verbose=False): + """Calculates the activations of the pool_3 layer for all images. + + Params: + -- images : Numpy array of dimension (n_images, hi, wi, 3). The values + must lie between 0 and 256. + -- sess : current session + -- batch_size : the images numpy array is split into batches with batch size + batch_size. A reasonable batch size depends on the disposable hardware. + -- verbose : If set to True and parameter out_step is given, the number of calculated + batches is reported. + Returns: + -- A numpy array of dimension (num images, 2048) that contains the + activations of the given tensor when feeding inception with the query tensor. + """ + inception_layer = _get_inception_layer(sess) + d0 = images.shape[0] + if batch_size > d0: + print("warning: batch size is bigger than the data size. setting batch size to data size") + batch_size = d0 + n_batches = d0//batch_size + n_used_imgs = n_batches*batch_size + pred_arr = np.empty((n_used_imgs,2048)) + for i in range(n_batches): + if verbose: + print("\rPropagating batch %d/%d" % (i+1, n_batches), end="", flush=True) + start = i*batch_size + end = start + batch_size + batch = images[start:end] + pred = sess.run(inception_layer, {'FID_Inception_Net/ExpandDims:0': batch}) + pred_arr[start:end] = pred.reshape(batch_size,-1) + if verbose: + print(" done") + return pred_arr +#------------------------------------------------------------------------------- + + +def calculate_frechet_distance(mu1, sigma1, mu2, sigma2): + """Numpy implementation of the Frechet Distance. + The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1) + and X_2 ~ N(mu_2, C_2) is + d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)). + + Params: + -- mu1 : Numpy array containing the activations of the pool_3 layer of the + inception net ( like returned by the function 'get_predictions') + -- mu2 : The sample mean over activations of the pool_3 layer, precalcualted + on an representive data set. + -- sigma2: The covariance matrix over activations of the pool_3 layer, + precalcualted on an representive data set. + + Returns: + -- dist : The Frechet Distance. + + Raises: + -- InvalidFIDException if nan occures. + """ + m = np.square(mu1 - mu2).sum() + #s = sp.linalg.sqrtm(np.dot(sigma1, sigma2)) # EDIT: commented out + s, _ = sp.linalg.sqrtm(np.dot(sigma1, sigma2), disp=False) # EDIT: added + dist = m + np.trace(sigma1+sigma2 - 2*s) + #if np.isnan(dist): # EDIT: commented out + # raise InvalidFIDException("nan occured in distance calculation.") # EDIT: commented out + #return dist # EDIT: commented out + return np.real(dist) # EDIT: added +#------------------------------------------------------------------------------- + + +def calculate_activation_statistics(images, sess, batch_size=50, verbose=False): + """Calculation of the statistics used by the FID. + Params: + -- images : Numpy array of dimension (n_images, hi, wi, 3). The values + must lie between 0 and 255. + -- sess : current session + -- batch_size : the images numpy array is split into batches with batch size + batch_size. A reasonable batch size depends on the available hardware. + -- verbose : If set to True and parameter out_step is given, the number of calculated + batches is reported. + Returns: + -- mu : The mean over samples of the activations of the pool_3 layer of + the incption model. + -- sigma : The covariance matrix of the activations of the pool_3 layer of + the incption model. + """ + act = get_activations(images, sess, batch_size, verbose) + mu = np.mean(act, axis=0) + sigma = np.cov(act, rowvar=False) + return mu, sigma +#------------------------------------------------------------------------------- + + +#------------------------------------------------------------------------------- +# The following functions aren't needed for calculating the FID +# they're just here to make this module work as a stand-alone script +# for calculating FID scores +#------------------------------------------------------------------------------- +def check_or_download_inception(inception_path): + ''' Checks if the path to the inception file is valid, or downloads + the file if it is not present. ''' + INCEPTION_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz' + if inception_path is None: + inception_path = '/tmp' + inception_path = pathlib.Path(inception_path) + model_file = inception_path / 'classify_image_graph_def.pb' + if not model_file.exists(): + print("Downloading Inception model") + from urllib import request + import tarfile + fn, _ = request.urlretrieve(INCEPTION_URL) + with tarfile.open(fn, mode='r') as f: + f.extract('classify_image_graph_def.pb', str(model_file.parent)) + return str(model_file) + + +def _handle_path(path, sess): + if path.endswith('.npz'): + f = np.load(path) + m, s = f['mu'][:], f['sigma'][:] + f.close() + else: + path = pathlib.Path(path) + files = list(path.glob('*.jpg')) + list(path.glob('*.png')) + x = np.array([imread(str(fn)).astype(np.float32) for fn in files]) + m, s = calculate_activation_statistics(x, sess) + return m, s + + +def calculate_fid_given_paths(paths, inception_path): + ''' Calculates the FID of two paths. ''' + inception_path = check_or_download_inception(inception_path) + + for p in paths: + if not os.path.exists(p): + raise RuntimeError("Invalid path: %s" % p) + + create_inception_graph(str(inception_path)) + with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + m1, s1 = _handle_path(paths[0], sess) + m2, s2 = _handle_path(paths[1], sess) + fid_value = calculate_frechet_distance(m1, s1, m2, s2) + return fid_value + + +if __name__ == "__main__": + from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter + parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter) + parser.add_argument("path", type=str, nargs=2, + help='Path to the generated images or to .npz statistic files') + parser.add_argument("-i", "--inception", type=str, default=None, + help='Path to Inception model (will be downloaded if not provided)') + parser.add_argument("--gpu", default="", type=str, + help='GPU to use (leave blank for CPU only)') + args = parser.parse_args() + os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu + fid_value = calculate_fid_given_paths(args.path, args.inception) + print("FID: ", fid_value) + +#---------------------------------------------------------------------------- +# EDIT: added + +class API: + def __init__(self, num_images, image_shape, image_dtype, minibatch_size): + import config + self.network_dir = os.path.join(config.result_dir, '_inception_fid') + self.network_file = check_or_download_inception(self.network_dir) + self.sess = tf.get_default_session() + create_inception_graph(self.network_file) + + def get_metric_names(self): + return ['FID'] + + def get_metric_formatting(self): + return ['%-10.4f'] + + def begin(self, mode): + assert mode in ['warmup', 'reals', 'fakes'] + self.activations = [] + + def feed(self, mode, minibatch): + act = get_activations(minibatch.transpose(0,2,3,1), self.sess, batch_size=minibatch.shape[0]) + self.activations.append(act) + + def end(self, mode): + act = np.concatenate(self.activations) + mu = np.mean(act, axis=0) + sigma = np.cov(act, rowvar=False) + if mode in ['warmup', 'reals']: + self.mu_real = mu + self.sigma_real = sigma + fid = calculate_frechet_distance(mu, sigma, self.mu_real, self.sigma_real) + return [fid] + +#---------------------------------------------------------------------------- diff --git a/models/pggan_tf_official/metrics/inception_score.py b/models/pggan_tf_official/metrics/inception_score.py new file mode 100644 index 0000000000000000000000000000000000000000..c7ed7483072b7844a3779c965fa058ef75d06f5a --- /dev/null +++ b/models/pggan_tf_official/metrics/inception_score.py @@ -0,0 +1,147 @@ +# Copyright 2016 Wojciech Zaremba +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Adapted from the original implementation by Wojciech Zaremba. +# Source: https://github.com/openai/improved-gan/blob/master/inception_score/model.py + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os.path +import sys +import tarfile + +import numpy as np +from six.moves import urllib +import tensorflow as tf +import glob +import scipy.misc +import math +import sys + +MODEL_DIR = '/tmp/imagenet' + +DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz' +softmax = None + +# Call this function with list of images. Each of elements should be a +# numpy array with values ranging from 0 to 255. +def get_inception_score(images, splits=10): + assert(type(images) == list) + assert(type(images[0]) == np.ndarray) + assert(len(images[0].shape) == 3) + #assert(np.max(images[0]) > 10) # EDIT: commented out + #assert(np.min(images[0]) >= 0.0) + inps = [] + for img in images: + img = img.astype(np.float32) + inps.append(np.expand_dims(img, 0)) + bs = 100 + with tf.Session() as sess: + preds = [] + n_batches = int(math.ceil(float(len(inps)) / float(bs))) + for i in range(n_batches): + #sys.stdout.write(".") # EDIT: commented out + #sys.stdout.flush() + inp = inps[(i * bs):min((i + 1) * bs, len(inps))] + inp = np.concatenate(inp, 0) + pred = sess.run(softmax, {'ExpandDims:0': inp}) + preds.append(pred) + preds = np.concatenate(preds, 0) + scores = [] + for i in range(splits): + part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :] + kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0))) + kl = np.mean(np.sum(kl, 1)) + scores.append(np.exp(kl)) + return np.mean(scores), np.std(scores) + +# This function is called automatically. +def _init_inception(): + global softmax + if not os.path.exists(MODEL_DIR): + os.makedirs(MODEL_DIR) + filename = DATA_URL.split('/')[-1] + filepath = os.path.join(MODEL_DIR, filename) + if not os.path.exists(filepath): + def _progress(count, block_size, total_size): + sys.stdout.write('\r>> Downloading %s %.1f%%' % ( + filename, float(count * block_size) / float(total_size) * 100.0)) + sys.stdout.flush() + filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress) + print() + statinfo = os.stat(filepath) + print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.') + tarfile.open(filepath, 'r:gz').extractall(MODEL_DIR) # EDIT: increased indent + with tf.gfile.FastGFile(os.path.join( + MODEL_DIR, 'classify_image_graph_def.pb'), 'rb') as f: + graph_def = tf.GraphDef() + graph_def.ParseFromString(f.read()) + _ = tf.import_graph_def(graph_def, name='') + # Works with an arbitrary minibatch size. + with tf.Session() as sess: + pool3 = sess.graph.get_tensor_by_name('pool_3:0') + ops = pool3.graph.get_operations() + for op_idx, op in enumerate(ops): + for o in op.outputs: + shape = o.get_shape() + shape = [s.value for s in shape] + new_shape = [] + for j, s in enumerate(shape): + if s == 1 and j == 0: + new_shape.append(None) + else: + new_shape.append(s) + try: + o._shape = tf.TensorShape(new_shape) + except ValueError: + o._shape_val = tf.TensorShape(new_shape) # EDIT: added for compatibility with tensorflow 1.6.0 + w = sess.graph.get_operation_by_name("softmax/logits/MatMul").inputs[1] + logits = tf.matmul(tf.squeeze(pool3), w) + softmax = tf.nn.softmax(logits) + +#if softmax is None: # EDIT: commented out +# _init_inception() # EDIT: commented out + +#---------------------------------------------------------------------------- +# EDIT: added + +class API: + def __init__(self, num_images, image_shape, image_dtype, minibatch_size): + import config + globals()['MODEL_DIR'] = os.path.join(config.result_dir, '_inception') + self.sess = tf.get_default_session() + _init_inception() + + def get_metric_names(self): + return ['IS_mean', 'IS_std'] + + def get_metric_formatting(self): + return ['%-10.4f', '%-10.4f'] + + def begin(self, mode): + assert mode in ['warmup', 'reals', 'fakes'] + self.images = [] + + def feed(self, mode, minibatch): + self.images.append(minibatch.transpose(0, 2, 3, 1)) + + def end(self, mode): + images = list(np.concatenate(self.images)) + with self.sess.as_default(): + mean, std = get_inception_score(images) + return [mean, std] + +#---------------------------------------------------------------------------- diff --git a/models/pggan_tf_official/metrics/ms_ssim.py b/models/pggan_tf_official/metrics/ms_ssim.py new file mode 100644 index 0000000000000000000000000000000000000000..1135f2a7788d4c6c68e22aeb2cdeaaeed780df75 --- /dev/null +++ b/models/pggan_tf_official/metrics/ms_ssim.py @@ -0,0 +1,200 @@ +#!/usr/bin/python +# +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# Adapted from the original implementation by The TensorFlow Authors. +# Source: https://github.com/tensorflow/models/blob/master/research/compression/image_encoder/msssim.py + +import numpy as np +from scipy import signal +from scipy.ndimage.filters import convolve + +def _FSpecialGauss(size, sigma): + """Function to mimic the 'fspecial' gaussian MATLAB function.""" + radius = size // 2 + offset = 0.0 + start, stop = -radius, radius + 1 + if size % 2 == 0: + offset = 0.5 + stop -= 1 + x, y = np.mgrid[offset + start:stop, offset + start:stop] + assert len(x) == size + g = np.exp(-((x**2 + y**2)/(2.0 * sigma**2))) + return g / g.sum() + +def _SSIMForMultiScale(img1, img2, max_val=255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03): + """Return the Structural Similarity Map between `img1` and `img2`. + + This function attempts to match the functionality of ssim_index_new.m by + Zhou Wang: http://www.cns.nyu.edu/~lcv/ssim/msssim.zip + + Arguments: + img1: Numpy array holding the first RGB image batch. + img2: Numpy array holding the second RGB image batch. + max_val: the dynamic range of the images (i.e., the difference between the + maximum the and minimum allowed values). + filter_size: Size of blur kernel to use (will be reduced for small images). + filter_sigma: Standard deviation for Gaussian blur kernel (will be reduced + for small images). + k1: Constant used to maintain stability in the SSIM calculation (0.01 in + the original paper). + k2: Constant used to maintain stability in the SSIM calculation (0.03 in + the original paper). + + Returns: + Pair containing the mean SSIM and contrast sensitivity between `img1` and + `img2`. + + Raises: + RuntimeError: If input images don't have the same shape or don't have four + dimensions: [batch_size, height, width, depth]. + """ + if img1.shape != img2.shape: + raise RuntimeError('Input images must have the same shape (%s vs. %s).' % (img1.shape, img2.shape)) + if img1.ndim != 4: + raise RuntimeError('Input images must have four dimensions, not %d' % img1.ndim) + + img1 = img1.astype(np.float32) + img2 = img2.astype(np.float32) + _, height, width, _ = img1.shape + + # Filter size can't be larger than height or width of images. + size = min(filter_size, height, width) + + # Scale down sigma if a smaller filter size is used. + sigma = size * filter_sigma / filter_size if filter_size else 0 + + if filter_size: + window = np.reshape(_FSpecialGauss(size, sigma), (1, size, size, 1)) + mu1 = signal.fftconvolve(img1, window, mode='valid') + mu2 = signal.fftconvolve(img2, window, mode='valid') + sigma11 = signal.fftconvolve(img1 * img1, window, mode='valid') + sigma22 = signal.fftconvolve(img2 * img2, window, mode='valid') + sigma12 = signal.fftconvolve(img1 * img2, window, mode='valid') + else: + # Empty blur kernel so no need to convolve. + mu1, mu2 = img1, img2 + sigma11 = img1 * img1 + sigma22 = img2 * img2 + sigma12 = img1 * img2 + + mu11 = mu1 * mu1 + mu22 = mu2 * mu2 + mu12 = mu1 * mu2 + sigma11 -= mu11 + sigma22 -= mu22 + sigma12 -= mu12 + + # Calculate intermediate values used by both ssim and cs_map. + c1 = (k1 * max_val) ** 2 + c2 = (k2 * max_val) ** 2 + v1 = 2.0 * sigma12 + c2 + v2 = sigma11 + sigma22 + c2 + ssim = np.mean((((2.0 * mu12 + c1) * v1) / ((mu11 + mu22 + c1) * v2)), axis=(1, 2, 3)) # Return for each image individually. + cs = np.mean(v1 / v2, axis=(1, 2, 3)) + return ssim, cs + +def _HoxDownsample(img): + return (img[:, 0::2, 0::2, :] + img[:, 1::2, 0::2, :] + img[:, 0::2, 1::2, :] + img[:, 1::2, 1::2, :]) * 0.25 + +def msssim(img1, img2, max_val=255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03, weights=None): + """Return the MS-SSIM score between `img1` and `img2`. + + This function implements Multi-Scale Structural Similarity (MS-SSIM) Image + Quality Assessment according to Zhou Wang's paper, "Multi-scale structural + similarity for image quality assessment" (2003). + Link: https://ece.uwaterloo.ca/~z70wang/publications/msssim.pdf + + Author's MATLAB implementation: + http://www.cns.nyu.edu/~lcv/ssim/msssim.zip + + Arguments: + img1: Numpy array holding the first RGB image batch. + img2: Numpy array holding the second RGB image batch. + max_val: the dynamic range of the images (i.e., the difference between the + maximum the and minimum allowed values). + filter_size: Size of blur kernel to use (will be reduced for small images). + filter_sigma: Standard deviation for Gaussian blur kernel (will be reduced + for small images). + k1: Constant used to maintain stability in the SSIM calculation (0.01 in + the original paper). + k2: Constant used to maintain stability in the SSIM calculation (0.03 in + the original paper). + weights: List of weights for each level; if none, use five levels and the + weights from the original paper. + + Returns: + MS-SSIM score between `img1` and `img2`. + + Raises: + RuntimeError: If input images don't have the same shape or don't have four + dimensions: [batch_size, height, width, depth]. + """ + if img1.shape != img2.shape: + raise RuntimeError('Input images must have the same shape (%s vs. %s).' % (img1.shape, img2.shape)) + if img1.ndim != 4: + raise RuntimeError('Input images must have four dimensions, not %d' % img1.ndim) + + # Note: default weights don't sum to 1.0 but do match the paper / matlab code. + weights = np.array(weights if weights else [0.0448, 0.2856, 0.3001, 0.2363, 0.1333]) + levels = weights.size + downsample_filter = np.ones((1, 2, 2, 1)) / 4.0 + im1, im2 = [x.astype(np.float32) for x in [img1, img2]] + mssim = [] + mcs = [] + for _ in range(levels): + ssim, cs = _SSIMForMultiScale( + im1, im2, max_val=max_val, filter_size=filter_size, + filter_sigma=filter_sigma, k1=k1, k2=k2) + mssim.append(ssim) + mcs.append(cs) + im1, im2 = [_HoxDownsample(x) for x in [im1, im2]] + + # Clip to zero. Otherwise we get NaNs. + mssim = np.clip(np.asarray(mssim), 0.0, np.inf) + mcs = np.clip(np.asarray(mcs), 0.0, np.inf) + + # Average over images only at the end. + return np.mean(np.prod(mcs[:-1, :] ** weights[:-1, np.newaxis], axis=0) * (mssim[-1, :] ** weights[-1])) + +#---------------------------------------------------------------------------- +# EDIT: added + +class API: + def __init__(self, num_images, image_shape, image_dtype, minibatch_size): + assert num_images % 2 == 0 and minibatch_size % 2 == 0 + self.num_pairs = num_images // 2 + + def get_metric_names(self): + return ['MS-SSIM'] + + def get_metric_formatting(self): + return ['%-10.4f'] + + def begin(self, mode): + assert mode in ['warmup', 'reals', 'fakes'] + self.sum = 0.0 + + def feed(self, mode, minibatch): + images = minibatch.transpose(0, 2, 3, 1) + score = msssim(images[0::2], images[1::2]) + self.sum += score * (images.shape[0] // 2) + + def end(self, mode): + avg = self.sum / self.num_pairs + return [avg] + +#---------------------------------------------------------------------------- diff --git a/models/pggan_tf_official/metrics/sliced_wasserstein.py b/models/pggan_tf_official/metrics/sliced_wasserstein.py new file mode 100644 index 0000000000000000000000000000000000000000..0028897c3aeffe7eb8f63eb4b1f37c2329dc84cf --- /dev/null +++ b/models/pggan_tf_official/metrics/sliced_wasserstein.py @@ -0,0 +1,135 @@ +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +import numpy as np +import scipy.ndimage + +#---------------------------------------------------------------------------- + +def get_descriptors_for_minibatch(minibatch, nhood_size, nhoods_per_image): + S = minibatch.shape # (minibatch, channel, height, width) + assert len(S) == 4 and S[1] == 3 + N = nhoods_per_image * S[0] + H = nhood_size // 2 + nhood, chan, x, y = np.ogrid[0:N, 0:3, -H:H+1, -H:H+1] + img = nhood // nhoods_per_image + x = x + np.random.randint(H, S[3] - H, size=(N, 1, 1, 1)) + y = y + np.random.randint(H, S[2] - H, size=(N, 1, 1, 1)) + idx = ((img * S[1] + chan) * S[2] + y) * S[3] + x + return minibatch.flat[idx] + +#---------------------------------------------------------------------------- + +def finalize_descriptors(desc): + if isinstance(desc, list): + desc = np.concatenate(desc, axis=0) + assert desc.ndim == 4 # (neighborhood, channel, height, width) + desc -= np.mean(desc, axis=(0, 2, 3), keepdims=True) + desc /= np.std(desc, axis=(0, 2, 3), keepdims=True) + desc = desc.reshape(desc.shape[0], -1) + return desc + +#---------------------------------------------------------------------------- + +def sliced_wasserstein(A, B, dir_repeats, dirs_per_repeat): + assert A.ndim == 2 and A.shape == B.shape # (neighborhood, descriptor_component) + results = [] + for repeat in range(dir_repeats): + dirs = np.random.randn(A.shape[1], dirs_per_repeat) # (descriptor_component, direction) + dirs /= np.sqrt(np.sum(np.square(dirs), axis=0, keepdims=True)) # normalize descriptor components for each direction + dirs = dirs.astype(np.float32) + projA = np.matmul(A, dirs) # (neighborhood, direction) + projB = np.matmul(B, dirs) + projA = np.sort(projA, axis=0) # sort neighborhood projections for each direction + projB = np.sort(projB, axis=0) + dists = np.abs(projA - projB) # pointwise wasserstein distances + results.append(np.mean(dists)) # average over neighborhoods and directions + return np.mean(results) # average over repeats + +#---------------------------------------------------------------------------- + +def downscale_minibatch(minibatch, lod): + if lod == 0: + return minibatch + t = minibatch.astype(np.float32) + for i in range(lod): + t = (t[:, :, 0::2, 0::2] + t[:, :, 0::2, 1::2] + t[:, :, 1::2, 0::2] + t[:, :, 1::2, 1::2]) * 0.25 + return np.round(t).clip(0, 255).astype(np.uint8) + +#---------------------------------------------------------------------------- + +gaussian_filter = np.float32([ + [1, 4, 6, 4, 1], + [4, 16, 24, 16, 4], + [6, 24, 36, 24, 6], + [4, 16, 24, 16, 4], + [1, 4, 6, 4, 1]]) / 256.0 + +def pyr_down(minibatch): # matches cv2.pyrDown() + assert minibatch.ndim == 4 + return scipy.ndimage.convolve(minibatch, gaussian_filter[np.newaxis, np.newaxis, :, :], mode='mirror')[:, :, ::2, ::2] + +def pyr_up(minibatch): # matches cv2.pyrUp() + assert minibatch.ndim == 4 + S = minibatch.shape + res = np.zeros((S[0], S[1], S[2] * 2, S[3] * 2), minibatch.dtype) + res[:, :, ::2, ::2] = minibatch + return scipy.ndimage.convolve(res, gaussian_filter[np.newaxis, np.newaxis, :, :] * 4.0, mode='mirror') + +def generate_laplacian_pyramid(minibatch, num_levels): + pyramid = [np.float32(minibatch)] + for i in range(1, num_levels): + pyramid.append(pyr_down(pyramid[-1])) + pyramid[-2] -= pyr_up(pyramid[-1]) + return pyramid + +def reconstruct_laplacian_pyramid(pyramid): + minibatch = pyramid[-1] + for level in pyramid[-2::-1]: + minibatch = pyr_up(minibatch) + level + return minibatch + +#---------------------------------------------------------------------------- + +class API: + def __init__(self, num_images, image_shape, image_dtype, minibatch_size): + self.nhood_size = 7 + self.nhoods_per_image = 128 + self.dir_repeats = 4 + self.dirs_per_repeat = 128 + self.resolutions = [] + res = image_shape[1] + while res >= 16: + self.resolutions.append(res) + res //= 2 + + def get_metric_names(self): + return ['SWDx1e3_%d' % res for res in self.resolutions] + ['SWDx1e3_avg'] + + def get_metric_formatting(self): + return ['%-13.4f'] * len(self.get_metric_names()) + + def begin(self, mode): + assert mode in ['warmup', 'reals', 'fakes'] + self.descriptors = [[] for res in self.resolutions] + + def feed(self, mode, minibatch): + for lod, level in enumerate(generate_laplacian_pyramid(minibatch, len(self.resolutions))): + desc = get_descriptors_for_minibatch(level, self.nhood_size, self.nhoods_per_image) + self.descriptors[lod].append(desc) + + def end(self, mode): + desc = [finalize_descriptors(d) for d in self.descriptors] + del self.descriptors + if mode in ['warmup', 'reals']: + self.desc_real = desc + dist = [sliced_wasserstein(dreal, dfake, self.dir_repeats, self.dirs_per_repeat) for dreal, dfake in zip(self.desc_real, desc)] + del desc + dist = [d * 1e3 for d in dist] # multiply by 10^3 + return dist + [np.mean(dist)] + +#---------------------------------------------------------------------------- diff --git a/models/pggan_tf_official/misc.py b/models/pggan_tf_official/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..d0130e3fcb80d8910eda7c0763a9c5897daed7e6 --- /dev/null +++ b/models/pggan_tf_official/misc.py @@ -0,0 +1,344 @@ +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +import os +import sys +import glob +import datetime +import pickle +import re +import numpy as np +from collections import OrderedDict +import scipy.ndimage +import PIL.Image + +import config +import dataset +import legacy + +#---------------------------------------------------------------------------- +# Convenience wrappers for pickle that are able to load data produced by +# older versions of the code. + +def load_pkl(filename): + with open(filename, 'rb') as file: + return legacy.LegacyUnpickler(file, encoding='latin1').load() + +def save_pkl(obj, filename): + with open(filename, 'wb') as file: + pickle.dump(obj, file, protocol=pickle.HIGHEST_PROTOCOL) + +#---------------------------------------------------------------------------- +# Image utils. + +def adjust_dynamic_range(data, drange_in, drange_out): + if drange_in != drange_out: + scale = (np.float32(drange_out[1]) - np.float32(drange_out[0])) / (np.float32(drange_in[1]) - np.float32(drange_in[0])) + bias = (np.float32(drange_out[0]) - np.float32(drange_in[0]) * scale) + data = data * scale + bias + return data + +def create_image_grid(images, grid_size=None): + assert images.ndim == 3 or images.ndim == 4 + num, img_w, img_h = images.shape[0], images.shape[-1], images.shape[-2] + + if grid_size is not None: + grid_w, grid_h = tuple(grid_size) + else: + grid_w = max(int(np.ceil(np.sqrt(num))), 1) + grid_h = max((num - 1) // grid_w + 1, 1) + + grid = np.zeros(list(images.shape[1:-2]) + [grid_h * img_h, grid_w * img_w], dtype=images.dtype) + for idx in range(num): + x = (idx % grid_w) * img_w + y = (idx // grid_w) * img_h + grid[..., y : y + img_h, x : x + img_w] = images[idx] + return grid + +def convert_to_pil_image(image, drange=[0,1]): + assert image.ndim == 2 or image.ndim == 3 + if image.ndim == 3: + if image.shape[0] == 1: + image = image[0] # grayscale CHW => HW + else: + image = image.transpose(1, 2, 0) # CHW -> HWC + + image = adjust_dynamic_range(image, drange, [0,255]) + image = np.rint(image).clip(0, 255).astype(np.uint8) + format = 'RGB' if image.ndim == 3 else 'L' + return PIL.Image.fromarray(image, format) + +def save_image(image, filename, drange=[0,1], quality=95): + img = convert_to_pil_image(image, drange) + if '.jpg' in filename: + img.save(filename,"JPEG", quality=quality, optimize=True) + else: + img.save(filename) + +def save_image_grid(images, filename, drange=[0,1], grid_size=None): + convert_to_pil_image(create_image_grid(images, grid_size), drange).save(filename) + +#---------------------------------------------------------------------------- +# Logging of stdout and stderr to a file. + +class OutputLogger(object): + def __init__(self): + self.file = None + self.buffer = '' + + def set_log_file(self, filename, mode='wt'): + assert self.file is None + self.file = open(filename, mode) + if self.buffer is not None: + self.file.write(self.buffer) + self.buffer = None + + def write(self, data): + if self.file is not None: + self.file.write(data) + if self.buffer is not None: + self.buffer += data + + def flush(self): + if self.file is not None: + self.file.flush() + +class TeeOutputStream(object): + def __init__(self, child_streams, autoflush=False): + self.child_streams = child_streams + self.autoflush = autoflush + + def write(self, data): + for stream in self.child_streams: + stream.write(data) + if self.autoflush: + self.flush() + + def flush(self): + for stream in self.child_streams: + stream.flush() + +output_logger = None + +def init_output_logging(): + global output_logger + if output_logger is None: + output_logger = OutputLogger() + sys.stdout = TeeOutputStream([sys.stdout, output_logger], autoflush=True) + sys.stderr = TeeOutputStream([sys.stderr, output_logger], autoflush=True) + +def set_output_log_file(filename, mode='wt'): + if output_logger is not None: + output_logger.set_log_file(filename, mode) + +#---------------------------------------------------------------------------- +# Reporting results. + +def create_result_subdir(result_dir, desc): + + # Select run ID and create subdir. + while True: + run_id = 0 + for fname in glob.glob(os.path.join(result_dir, '*')): + try: + fbase = os.path.basename(fname) + ford = int(fbase[:fbase.find('-')]) + run_id = max(run_id, ford + 1) + except ValueError: + pass + + result_subdir = os.path.join(result_dir, '%03d-%s' % (run_id, desc)) + try: + os.makedirs(result_subdir) + break + except OSError: + if os.path.isdir(result_subdir): + continue + raise + + print("Saving results to", result_subdir) + set_output_log_file(os.path.join(result_subdir, 'log.txt')) + + # Export config. + try: + with open(os.path.join(result_subdir, 'config.txt'), 'wt') as fout: + for k, v in sorted(config.__dict__.items()): + if not k.startswith('_'): + fout.write("%s = %s\n" % (k, str(v))) + except: + pass + + return result_subdir + +def format_time(seconds): + s = int(np.rint(seconds)) + if s < 60: return '%ds' % (s) + elif s < 60*60: return '%dm %02ds' % (s // 60, s % 60) + elif s < 24*60*60: return '%dh %02dm %02ds' % (s // (60*60), (s // 60) % 60, s % 60) + else: return '%dd %02dh %02dm' % (s // (24*60*60), (s // (60*60)) % 24, (s // 60) % 60) + +#---------------------------------------------------------------------------- +# Locating results. + +def locate_result_subdir(run_id_or_result_subdir): + if isinstance(run_id_or_result_subdir, str) and os.path.isdir(run_id_or_result_subdir): + return run_id_or_result_subdir + + searchdirs = [] + searchdirs += [''] + searchdirs += ['results'] + searchdirs += ['networks'] + + for searchdir in searchdirs: + dir = config.result_dir if searchdir == '' else os.path.join(config.result_dir, searchdir) + dir = os.path.join(dir, str(run_id_or_result_subdir)) + if os.path.isdir(dir): + return dir + prefix = '%03d' % run_id_or_result_subdir if isinstance(run_id_or_result_subdir, int) else str(run_id_or_result_subdir) + dirs = sorted(glob.glob(os.path.join(config.result_dir, searchdir, prefix + '-*'))) + dirs = [dir for dir in dirs if os.path.isdir(dir)] + if len(dirs) == 1: + return dirs[0] + raise IOError('Cannot locate result subdir for run', run_id_or_result_subdir) + +def list_network_pkls(run_id_or_result_subdir, include_final=True): + result_subdir = locate_result_subdir(run_id_or_result_subdir) + pkls = sorted(glob.glob(os.path.join(result_subdir, 'network-*.pkl'))) + if len(pkls) >= 1 and os.path.basename(pkls[0]) == 'network-final.pkl': + if include_final: + pkls.append(pkls[0]) + del pkls[0] + return pkls + +def locate_network_pkl(run_id_or_result_subdir_or_network_pkl, snapshot=None): + if isinstance(run_id_or_result_subdir_or_network_pkl, str) and os.path.isfile(run_id_or_result_subdir_or_network_pkl): + return run_id_or_result_subdir_or_network_pkl + + pkls = list_network_pkls(run_id_or_result_subdir_or_network_pkl) + if len(pkls) >= 1 and snapshot is None: + return pkls[-1] + for pkl in pkls: + try: + name = os.path.splitext(os.path.basename(pkl))[0] + number = int(name.split('-')[-1]) + if number == snapshot: + return pkl + except ValueError: pass + except IndexError: pass + raise IOError('Cannot locate network pkl for snapshot', snapshot) + +def get_id_string_for_network_pkl(network_pkl): + p = network_pkl.replace('.pkl', '').replace('\\', '/').split('/') + return '-'.join(p[max(len(p) - 2, 0):]) + +#---------------------------------------------------------------------------- +# Loading and using trained networks. + +def load_network_pkl(run_id_or_result_subdir_or_network_pkl, snapshot=None): + return load_pkl(locate_network_pkl(run_id_or_result_subdir_or_network_pkl, snapshot)) + +def random_latents(num_latents, G, random_state=None): + if random_state is not None: + return random_state.randn(num_latents, *G.input_shape[1:]).astype(np.float32) + else: + return np.random.randn(num_latents, *G.input_shape[1:]).astype(np.float32) + +def load_dataset_for_previous_run(run_id, **kwargs): # => dataset_obj, mirror_augment + result_subdir = locate_result_subdir(run_id) + + # Parse config.txt. + parsed_cfg = dict() + with open(os.path.join(result_subdir, 'config.txt'), 'rt') as f: + for line in f: + if line.startswith('dataset =') or line.startswith('train ='): + exec(line, parsed_cfg, parsed_cfg) + dataset_cfg = parsed_cfg.get('dataset', dict()) + train_cfg = parsed_cfg.get('train', dict()) + mirror_augment = train_cfg.get('mirror_augment', False) + + # Handle legacy options. + if 'h5_path' in dataset_cfg: + dataset_cfg['tfrecord_dir'] = dataset_cfg.pop('h5_path').replace('.h5', '') + if 'mirror_augment' in dataset_cfg: + mirror_augment = dataset_cfg.pop('mirror_augment') + if 'max_labels' in dataset_cfg: + v = dataset_cfg.pop('max_labels') + if v is None: v = 0 + if v == 'all': v = 'full' + dataset_cfg['max_label_size'] = v + if 'max_images' in dataset_cfg: + dataset_cfg.pop('max_images') + + # Handle legacy dataset names. + v = dataset_cfg['tfrecord_dir'] + v = v.replace('-32x32', '').replace('-32', '') + v = v.replace('-128x128', '').replace('-128', '') + v = v.replace('-256x256', '').replace('-256', '') + v = v.replace('-1024x1024', '').replace('-1024', '') + v = v.replace('celeba-hq', 'celebahq') + v = v.replace('cifar-10', 'cifar10') + v = v.replace('cifar-100', 'cifar100') + v = v.replace('mnist-rgb', 'mnistrgb') + v = re.sub('lsun-100k-([^-]*)', 'lsun-\\1-100k', v) + v = re.sub('lsun-full-([^-]*)', 'lsun-\\1-full', v) + dataset_cfg['tfrecord_dir'] = v + + # Load dataset. + dataset_cfg.update(kwargs) + dataset_obj = dataset.load_dataset(data_dir=config.data_dir, **dataset_cfg) + return dataset_obj, mirror_augment + +def apply_mirror_augment(minibatch): + mask = np.random.rand(minibatch.shape[0]) < 0.5 + minibatch = np.array(minibatch) + minibatch[mask] = minibatch[mask, :, :, ::-1] + return minibatch + +#---------------------------------------------------------------------------- +# Text labels. + +_text_label_cache = OrderedDict() + +def draw_text_label(img, text, x, y, alignx=0.5, aligny=0.5, color=255, opacity=1.0, glow_opacity=1.0, **kwargs): + color = np.array(color).flatten().astype(np.float32) + assert img.ndim == 3 and img.shape[2] == color.size or color.size == 1 + alpha, glow = setup_text_label(text, **kwargs) + xx, yy = int(np.rint(x - alpha.shape[1] * alignx)), int(np.rint(y - alpha.shape[0] * aligny)) + xb, yb = max(-xx, 0), max(-yy, 0) + xe, ye = min(alpha.shape[1], img.shape[1] - xx), min(alpha.shape[0], img.shape[0] - yy) + img = np.array(img) + slice = img[yy+yb : yy+ye, xx+xb : xx+xe, :] + slice[:] = slice * (1.0 - (1.0 - (1.0 - alpha[yb:ye, xb:xe]) * (1.0 - glow[yb:ye, xb:xe] * glow_opacity)) * opacity)[:, :, np.newaxis] + slice[:] = slice + alpha[yb:ye, xb:xe, np.newaxis] * (color * opacity)[np.newaxis, np.newaxis, :] + return img + +def setup_text_label(text, font='Calibri', fontsize=32, padding=6, glow_size=2.0, glow_coef=3.0, glow_exp=2.0, cache_size=100): # => (alpha, glow) + # Lookup from cache. + key = (text, font, fontsize, padding, glow_size, glow_coef, glow_exp) + if key in _text_label_cache: + value = _text_label_cache[key] + del _text_label_cache[key] # LRU policy + _text_label_cache[key] = value + return value + + # Limit cache size. + while len(_text_label_cache) >= cache_size: + _text_label_cache.popitem(last=False) + + # Render text. + import moviepy.editor # pip install moviepy + alpha = moviepy.editor.TextClip(text, font=font, fontsize=fontsize).mask.make_frame(0) + alpha = np.pad(alpha, padding, mode='constant', constant_values=0.0) + glow = scipy.ndimage.gaussian_filter(alpha, glow_size) + glow = 1.0 - np.maximum(1.0 - glow * glow_coef, 0.0) ** glow_exp + + # Add to cache. + value = (alpha, glow) + _text_label_cache[key] = value + return value + +#---------------------------------------------------------------------------- diff --git a/models/pggan_tf_official/networks.py b/models/pggan_tf_official/networks.py new file mode 100644 index 0000000000000000000000000000000000000000..731683f7834bb3268b32e07326c9caec83c888af --- /dev/null +++ b/models/pggan_tf_official/networks.py @@ -0,0 +1,315 @@ +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +import numpy as np +import tensorflow as tf + +# NOTE: Do not import any application-specific modules here! + +#---------------------------------------------------------------------------- + +def lerp(a, b, t): return a + (b - a) * t +def lerp_clip(a, b, t): return a + (b - a) * tf.clip_by_value(t, 0.0, 1.0) +def cset(cur_lambda, new_cond, new_lambda): return lambda: tf.cond(new_cond, new_lambda, cur_lambda) + +#---------------------------------------------------------------------------- +# Get/create weight tensor for a convolutional or fully-connected layer. + +def get_weight(shape, gain=np.sqrt(2), use_wscale=False, fan_in=None): + if fan_in is None: fan_in = np.prod(shape[:-1]) + std = gain / np.sqrt(fan_in) # He init + if use_wscale: + wscale = tf.constant(np.float32(std), name='wscale') + return tf.get_variable('weight', shape=shape, initializer=tf.initializers.random_normal()) * wscale + else: + return tf.get_variable('weight', shape=shape, initializer=tf.initializers.random_normal(0, std)) + +#---------------------------------------------------------------------------- +# Fully-connected layer. + +def dense(x, fmaps, gain=np.sqrt(2), use_wscale=False): + if len(x.shape) > 2: + x = tf.reshape(x, [-1, np.prod([d.value for d in x.shape[1:]])]) + w = get_weight([x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale) + w = tf.cast(w, x.dtype) + return tf.matmul(x, w) + +#---------------------------------------------------------------------------- +# Convolutional layer. + +def conv2d(x, fmaps, kernel, gain=np.sqrt(2), use_wscale=False): + assert kernel >= 1 and kernel % 2 == 1 + w = get_weight([kernel, kernel, x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale) + w = tf.cast(w, x.dtype) + return tf.nn.conv2d(x, w, strides=[1,1,1,1], padding='SAME', data_format='NCHW') + +#---------------------------------------------------------------------------- +# Apply bias to the given activation tensor. + +def apply_bias(x): + b = tf.get_variable('bias', shape=[x.shape[1]], initializer=tf.initializers.zeros()) + b = tf.cast(b, x.dtype) + if len(x.shape) == 2: + return x + b + else: + return x + tf.reshape(b, [1, -1, 1, 1]) + +#---------------------------------------------------------------------------- +# Leaky ReLU activation. Same as tf.nn.leaky_relu, but supports FP16. + +def leaky_relu(x, alpha=0.2): + with tf.name_scope('LeakyRelu'): + alpha = tf.constant(alpha, dtype=x.dtype, name='alpha') + return tf.maximum(x * alpha, x) + +#---------------------------------------------------------------------------- +# Nearest-neighbor upscaling layer. + +def upscale2d(x, factor=2): + assert isinstance(factor, int) and factor >= 1 + if factor == 1: return x + with tf.variable_scope('Upscale2D'): + s = x.shape + x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1]) + x = tf.tile(x, [1, 1, 1, factor, 1, factor]) + x = tf.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor]) + return x + +#---------------------------------------------------------------------------- +# Fused upscale2d + conv2d. +# Faster and uses less memory than performing the operations separately. + +def upscale2d_conv2d(x, fmaps, kernel, gain=np.sqrt(2), use_wscale=False): + assert kernel >= 1 and kernel % 2 == 1 + w = get_weight([kernel, kernel, fmaps, x.shape[1].value], gain=gain, use_wscale=use_wscale, fan_in=(kernel**2)*x.shape[1].value) + w = tf.pad(w, [[1,1], [1,1], [0,0], [0,0]], mode='CONSTANT') + w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]]) + w = tf.cast(w, x.dtype) + os = [tf.shape(x)[0], fmaps, x.shape[2] * 2, x.shape[3] * 2] + return tf.nn.conv2d_transpose(x, w, os, strides=[1,1,2,2], padding='SAME', data_format='NCHW') + +#---------------------------------------------------------------------------- +# Box filter downscaling layer. + +def downscale2d(x, factor=2): + assert isinstance(factor, int) and factor >= 1 + if factor == 1: return x + with tf.variable_scope('Downscale2D'): + ksize = [1, 1, factor, factor] + return tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding='VALID', data_format='NCHW') # NOTE: requires tf_config['graph_options.place_pruned_graph'] = True + +#---------------------------------------------------------------------------- +# Fused conv2d + downscale2d. +# Faster and uses less memory than performing the operations separately. + +def conv2d_downscale2d(x, fmaps, kernel, gain=np.sqrt(2), use_wscale=False): + assert kernel >= 1 and kernel % 2 == 1 + w = get_weight([kernel, kernel, x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale) + w = tf.pad(w, [[1,1], [1,1], [0,0], [0,0]], mode='CONSTANT') + w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]]) * 0.25 + w = tf.cast(w, x.dtype) + return tf.nn.conv2d(x, w, strides=[1,1,2,2], padding='SAME', data_format='NCHW') + +#---------------------------------------------------------------------------- +# Pixelwise feature vector normalization. + +def pixel_norm(x, epsilon=1e-8): + with tf.variable_scope('PixelNorm'): + return x * tf.rsqrt(tf.reduce_mean(tf.square(x), axis=1, keepdims=True) + epsilon) + +#---------------------------------------------------------------------------- +# Minibatch standard deviation. + +def minibatch_stddev_layer(x, group_size=4): + with tf.variable_scope('MinibatchStddev'): + group_size = tf.minimum(group_size, tf.shape(x)[0]) # Minibatch must be divisible by (or smaller than) group_size. + s = x.shape # [NCHW] Input shape. + y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) # [GMCHW] Split minibatch into M groups of size G. + y = tf.cast(y, tf.float32) # [GMCHW] Cast to FP32. + y -= tf.reduce_mean(y, axis=0, keepdims=True) # [GMCHW] Subtract mean over group. + y = tf.reduce_mean(tf.square(y), axis=0) # [MCHW] Calc variance over group. + y = tf.sqrt(y + 1e-8) # [MCHW] Calc stddev over group. + y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) # [M111] Take average over fmaps and pixels. + y = tf.cast(y, x.dtype) # [M111] Cast back to original data type. + y = tf.tile(y, [group_size, 1, s[2], s[3]]) # [N1HW] Replicate over group and pixels. + return tf.concat([x, y], axis=1) # [NCHW] Append as new fmap. + +#---------------------------------------------------------------------------- +# Generator network used in the paper. + +def G_paper( + latents_in, # First input: Latent vectors [minibatch, latent_size]. + labels_in, # Second input: Labels [minibatch, label_size]. + num_channels = 1, # Number of output color channels. Overridden based on dataset. + resolution = 32, # Output resolution. Overridden based on dataset. + label_size = 0, # Dimensionality of the labels, 0 if no labels. Overridden based on dataset. + fmap_base = 8192, # Overall multiplier for the number of feature maps. + fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution. + fmap_max = 512, # Maximum number of feature maps in any layer. + latent_size = None, # Dimensionality of the latent vectors. None = min(fmap_base, fmap_max). + normalize_latents = True, # Normalize latent vectors before feeding them to the network? + use_wscale = True, # Enable equalized learning rate? + use_pixelnorm = True, # Enable pixelwise feature vector normalization? + pixelnorm_epsilon = 1e-8, # Constant epsilon for pixelwise feature vector normalization. + use_leakyrelu = True, # True = leaky ReLU, False = ReLU. + dtype = 'float32', # Data type to use for activations and outputs. + fused_scale = True, # True = use fused upscale2d + conv2d, False = separate upscale2d layers. + structure = None, # 'linear' = human-readable, 'recursive' = efficient, None = select automatically. + is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation. + **kwargs): # Ignore unrecognized keyword args. + + resolution_log2 = int(np.log2(resolution)) + assert resolution == 2**resolution_log2 and resolution >= 4 + def nf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max) + def PN(x): return pixel_norm(x, epsilon=pixelnorm_epsilon) if use_pixelnorm else x + if latent_size is None: latent_size = nf(0) + if structure is None: structure = 'linear' if is_template_graph else 'recursive' + act = leaky_relu if use_leakyrelu else tf.nn.relu + + latents_in.set_shape([None, latent_size]) + labels_in.set_shape([None, label_size]) + combo_in = tf.cast(tf.concat([latents_in, labels_in], axis=1), dtype) + lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0.0), trainable=False), dtype) + + # Building blocks. + def block(x, res): # res = 2..resolution_log2 + with tf.variable_scope('%dx%d' % (2**res, 2**res)): + if res == 2: # 4x4 + if normalize_latents: x = pixel_norm(x, epsilon=pixelnorm_epsilon) + with tf.variable_scope('Dense'): + x = dense(x, fmaps=nf(res-1)*16, gain=np.sqrt(2)/4, use_wscale=use_wscale) # override gain to match the original Theano implementation + x = tf.reshape(x, [-1, nf(res-1), 4, 4]) + x = PN(act(apply_bias(x))) + with tf.variable_scope('Conv'): + x = PN(act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale)))) + else: # 8x8 and up + if fused_scale: + with tf.variable_scope('Conv0_up'): + x = PN(act(apply_bias(upscale2d_conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale)))) + else: + x = upscale2d(x) + with tf.variable_scope('Conv0'): + x = PN(act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale)))) + with tf.variable_scope('Conv1'): + x = PN(act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale)))) + return x + def torgb(x, res): # res = 2..resolution_log2 + lod = resolution_log2 - res + with tf.variable_scope('ToRGB_lod%d' % lod): + return apply_bias(conv2d(x, fmaps=num_channels, kernel=1, gain=1, use_wscale=use_wscale)) + + # Linear structure: simple but inefficient. + if structure == 'linear': + x = block(combo_in, 2) + images_out = torgb(x, 2) + for res in range(3, resolution_log2 + 1): + lod = resolution_log2 - res + x = block(x, res) + img = torgb(x, res) + images_out = upscale2d(images_out) + with tf.variable_scope('Grow_lod%d' % lod): + images_out = lerp_clip(img, images_out, lod_in - lod) + + # Recursive structure: complex but efficient. + if structure == 'recursive': + def grow(x, res, lod): + y = block(x, res) + img = lambda: upscale2d(torgb(y, res), 2**lod) + if res > 2: img = cset(img, (lod_in > lod), lambda: upscale2d(lerp(torgb(y, res), upscale2d(torgb(x, res - 1)), lod_in - lod), 2**lod)) + if lod > 0: img = cset(img, (lod_in < lod), lambda: grow(y, res + 1, lod - 1)) + return img() + images_out = grow(combo_in, 2, resolution_log2 - 2) + + assert images_out.dtype == tf.as_dtype(dtype) + images_out = tf.identity(images_out, name='images_out') + return images_out + +#---------------------------------------------------------------------------- +# Discriminator network used in the paper. + +def D_paper( + images_in, # Input: Images [minibatch, channel, height, width]. + num_channels = 1, # Number of input color channels. Overridden based on dataset. + resolution = 32, # Input resolution. Overridden based on dataset. + label_size = 0, # Dimensionality of the labels, 0 if no labels. Overridden based on dataset. + fmap_base = 8192, # Overall multiplier for the number of feature maps. + fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution. + fmap_max = 512, # Maximum number of feature maps in any layer. + use_wscale = True, # Enable equalized learning rate? + mbstd_group_size = 4, # Group size for the minibatch standard deviation layer, 0 = disable. + dtype = 'float32', # Data type to use for activations and outputs. + fused_scale = True, # True = use fused conv2d + downscale2d, False = separate downscale2d layers. + structure = None, # 'linear' = human-readable, 'recursive' = efficient, None = select automatically + is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation. + **kwargs): # Ignore unrecognized keyword args. + + resolution_log2 = int(np.log2(resolution)) + assert resolution == 2**resolution_log2 and resolution >= 4 + def nf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max) + if structure is None: structure = 'linear' if is_template_graph else 'recursive' + act = leaky_relu + + images_in.set_shape([None, num_channels, resolution, resolution]) + images_in = tf.cast(images_in, dtype) + lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0.0), trainable=False), dtype) + + # Building blocks. + def fromrgb(x, res): # res = 2..resolution_log2 + with tf.variable_scope('FromRGB_lod%d' % (resolution_log2 - res)): + return act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=1, use_wscale=use_wscale))) + def block(x, res): # res = 2..resolution_log2 + with tf.variable_scope('%dx%d' % (2**res, 2**res)): + if res >= 3: # 8x8 and up + with tf.variable_scope('Conv0'): + x = act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale))) + if fused_scale: + with tf.variable_scope('Conv1_down'): + x = act(apply_bias(conv2d_downscale2d(x, fmaps=nf(res-2), kernel=3, use_wscale=use_wscale))) + else: + with tf.variable_scope('Conv1'): + x = act(apply_bias(conv2d(x, fmaps=nf(res-2), kernel=3, use_wscale=use_wscale))) + x = downscale2d(x) + else: # 4x4 + if mbstd_group_size > 1: + x = minibatch_stddev_layer(x, mbstd_group_size) + with tf.variable_scope('Conv'): + x = act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale))) + with tf.variable_scope('Dense0'): + x = act(apply_bias(dense(x, fmaps=nf(res-2), use_wscale=use_wscale))) + with tf.variable_scope('Dense1'): + x = apply_bias(dense(x, fmaps=1+label_size, gain=1, use_wscale=use_wscale)) + return x + + # Linear structure: simple but inefficient. + if structure == 'linear': + img = images_in + x = fromrgb(img, resolution_log2) + for res in range(resolution_log2, 2, -1): + lod = resolution_log2 - res + x = block(x, res) + img = downscale2d(img) + y = fromrgb(img, res - 1) + with tf.variable_scope('Grow_lod%d' % lod): + x = lerp_clip(x, y, lod_in - lod) + combo_out = block(x, 2) + + # Recursive structure: complex but efficient. + if structure == 'recursive': + def grow(res, lod): + x = lambda: fromrgb(downscale2d(images_in, 2**lod), res) + if lod > 0: x = cset(x, (lod_in < lod), lambda: grow(res + 1, lod - 1)) + x = block(x(), res); y = lambda: x + if res > 2: y = cset(y, (lod_in > lod), lambda: lerp(x, fromrgb(downscale2d(images_in, 2**(lod+1)), res - 1), lod_in - lod)) + return y() + combo_out = grow(2, resolution_log2 - 2) + + assert combo_out.dtype == tf.as_dtype(dtype) + scores_out = tf.identity(combo_out[:, :1], name='scores_out') + labels_out = tf.identity(combo_out[:, 1:], name='labels_out') + return scores_out, labels_out + +#---------------------------------------------------------------------------- diff --git a/models/pggan_tf_official/requirements-pip.txt b/models/pggan_tf_official/requirements-pip.txt new file mode 100644 index 0000000000000000000000000000000000000000..1011905d046613756cdb45702d28479daa4ba742 --- /dev/null +++ b/models/pggan_tf_official/requirements-pip.txt @@ -0,0 +1,10 @@ +numpy>=1.13.3 +scipy>=1.0.0 +tensorflow-gpu>=1.6.0 +moviepy>=0.2.3.2 +Pillow>=3.1.1 +lmdb>=0.93 +opencv-python>=3.4.0.12 +cryptography>=2.1.4 +h5py>=2.7.1 +six>=1.11.0 diff --git a/models/pggan_tf_official/tfutil.py b/models/pggan_tf_official/tfutil.py new file mode 100644 index 0000000000000000000000000000000000000000..cf7ad0ada400aae935759190a6384c5dd8a3fc08 --- /dev/null +++ b/models/pggan_tf_official/tfutil.py @@ -0,0 +1,749 @@ +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +import os +import sys +import inspect +import importlib +import imp +import numpy as np +from collections import OrderedDict +import tensorflow as tf + +#---------------------------------------------------------------------------- +# Convenience. + +def run(*args, **kwargs): # Run the specified ops in the default session. + return tf.get_default_session().run(*args, **kwargs) + +def is_tf_expression(x): + return isinstance(x, tf.Tensor) or isinstance(x, tf.Variable) or isinstance(x, tf.Operation) + +def shape_to_list(shape): + return [dim.value for dim in shape] + +def flatten(x): + with tf.name_scope('Flatten'): + return tf.reshape(x, [-1]) + +def log2(x): + with tf.name_scope('Log2'): + return tf.log(x) * np.float32(1.0 / np.log(2.0)) + +def exp2(x): + with tf.name_scope('Exp2'): + return tf.exp(x * np.float32(np.log(2.0))) + +def lerp(a, b, t): + with tf.name_scope('Lerp'): + return a + (b - a) * t + +def lerp_clip(a, b, t): + with tf.name_scope('LerpClip'): + return a + (b - a) * tf.clip_by_value(t, 0.0, 1.0) + +def absolute_name_scope(scope): # Forcefully enter the specified name scope, ignoring any surrounding scopes. + return tf.name_scope(scope + '/') + +#---------------------------------------------------------------------------- +# Initialize TensorFlow graph and session using good default settings. + +def init_tf(config_dict=dict()): + if tf.get_default_session() is None: + tf.set_random_seed(np.random.randint(1 << 31)) + create_session(config_dict, force_as_default=True) + +#---------------------------------------------------------------------------- +# Create tf.Session based on config dict of the form +# {'gpu_options.allow_growth': True} + +def create_session(config_dict=dict(), force_as_default=False): + config = tf.ConfigProto() + for key, value in config_dict.items(): + fields = key.split('.') + obj = config + for field in fields[:-1]: + obj = getattr(obj, field) + setattr(obj, fields[-1], value) + session = tf.Session(config=config) + if force_as_default: + session._default_session = session.as_default() + session._default_session.enforce_nesting = False + session._default_session.__enter__() + return session + +#---------------------------------------------------------------------------- +# Initialize all tf.Variables that have not already been initialized. +# Equivalent to the following, but more efficient and does not bloat the tf graph: +# tf.variables_initializer(tf.report_unitialized_variables()).run() + +def init_uninited_vars(vars=None): + if vars is None: vars = tf.global_variables() + test_vars = []; test_ops = [] + with tf.control_dependencies(None): # ignore surrounding control_dependencies + for var in vars: + assert is_tf_expression(var) + try: + tf.get_default_graph().get_tensor_by_name(var.name.replace(':0', '/IsVariableInitialized:0')) + except KeyError: + # Op does not exist => variable may be uninitialized. + test_vars.append(var) + with absolute_name_scope(var.name.split(':')[0]): + test_ops.append(tf.is_variable_initialized(var)) + init_vars = [var for var, inited in zip(test_vars, run(test_ops)) if not inited] + run([var.initializer for var in init_vars]) + +#---------------------------------------------------------------------------- +# Set the values of given tf.Variables. +# Equivalent to the following, but more efficient and does not bloat the tf graph: +# tfutil.run([tf.assign(var, value) for var, value in var_to_value_dict.items()] + +def set_vars(var_to_value_dict): + ops = [] + feed_dict = {} + for var, value in var_to_value_dict.items(): + assert is_tf_expression(var) + try: + setter = tf.get_default_graph().get_tensor_by_name(var.name.replace(':0', '/setter:0')) # look for existing op + except KeyError: + with absolute_name_scope(var.name.split(':')[0]): + with tf.control_dependencies(None): # ignore surrounding control_dependencies + setter = tf.assign(var, tf.placeholder(var.dtype, var.shape, 'new_value'), name='setter') # create new setter + ops.append(setter) + feed_dict[setter.op.inputs[1]] = value + run(ops, feed_dict) + +#---------------------------------------------------------------------------- +# Autosummary creates an identity op that internally keeps track of the input +# values and automatically shows up in TensorBoard. The reported value +# represents an average over input components. The average is accumulated +# constantly over time and flushed when save_summaries() is called. +# +# Notes: +# - The output tensor must be used as an input for something else in the +# graph. Otherwise, the autosummary op will not get executed, and the average +# value will not get accumulated. +# - It is perfectly fine to include autosummaries with the same name in +# several places throughout the graph, even if they are executed concurrently. +# - It is ok to also pass in a python scalar or numpy array. In this case, it +# is added to the average immediately. + +_autosummary_vars = OrderedDict() # name => [var, ...] +_autosummary_immediate = OrderedDict() # name => update_op, update_value +_autosummary_finalized = False + +def autosummary(name, value): + id = name.replace('/', '_') + if is_tf_expression(value): + with tf.name_scope('summary_' + id), tf.device(value.device): + update_op = _create_autosummary_var(name, value) + with tf.control_dependencies([update_op]): + return tf.identity(value) + else: # python scalar or numpy array + if name not in _autosummary_immediate: + with absolute_name_scope('Autosummary/' + id), tf.device(None), tf.control_dependencies(None): + update_value = tf.placeholder(tf.float32) + update_op = _create_autosummary_var(name, update_value) + _autosummary_immediate[name] = update_op, update_value + update_op, update_value = _autosummary_immediate[name] + run(update_op, {update_value: np.float32(value)}) + return value + +# Create the necessary ops to include autosummaries in TensorBoard report. +# Note: This should be done only once per graph. +def finalize_autosummaries(): + global _autosummary_finalized + if _autosummary_finalized: + return + _autosummary_finalized = True + init_uninited_vars([var for vars in _autosummary_vars.values() for var in vars]) + with tf.device(None), tf.control_dependencies(None): + for name, vars in _autosummary_vars.items(): + id = name.replace('/', '_') + with absolute_name_scope('Autosummary/' + id): + sum = tf.add_n(vars) + avg = sum[0] / sum[1] + with tf.control_dependencies([avg]): # read before resetting + reset_ops = [tf.assign(var, tf.zeros(2)) for var in vars] + with tf.name_scope(None), tf.control_dependencies(reset_ops): # reset before reporting + tf.summary.scalar(name, avg) + +# Internal helper for creating autosummary accumulators. +def _create_autosummary_var(name, value_expr): + assert not _autosummary_finalized + v = tf.cast(value_expr, tf.float32) + if v.shape.ndims is 0: + v = [v, np.float32(1.0)] + elif v.shape.ndims is 1: + v = [tf.reduce_sum(v), tf.cast(tf.shape(v)[0], tf.float32)] + else: + v = [tf.reduce_sum(v), tf.reduce_prod(tf.cast(tf.shape(v), tf.float32))] + v = tf.cond(tf.is_finite(v[0]), lambda: tf.stack(v), lambda: tf.zeros(2)) + with tf.control_dependencies(None): + var = tf.Variable(tf.zeros(2)) # [numerator, denominator] + update_op = tf.cond(tf.is_variable_initialized(var), lambda: tf.assign_add(var, v), lambda: tf.assign(var, v)) + if name in _autosummary_vars: + _autosummary_vars[name].append(var) + else: + _autosummary_vars[name] = [var] + return update_op + +#---------------------------------------------------------------------------- +# Call filewriter.add_summary() with all summaries in the default graph, +# automatically finalizing and merging them on the first call. + +_summary_merge_op = None + +def save_summaries(filewriter, global_step=None): + global _summary_merge_op + if _summary_merge_op is None: + finalize_autosummaries() + with tf.device(None), tf.control_dependencies(None): + _summary_merge_op = tf.summary.merge_all() + filewriter.add_summary(_summary_merge_op.eval(), global_step) + +#---------------------------------------------------------------------------- +# Utilities for importing modules and objects by name. + +def import_module(module_or_obj_name): + parts = module_or_obj_name.split('.') + parts[0] = {'np': 'numpy', 'tf': 'tensorflow'}.get(parts[0], parts[0]) + for i in range(len(parts), 0, -1): + try: + module = importlib.import_module('.'.join(parts[:i])) + relative_obj_name = '.'.join(parts[i:]) + return module, relative_obj_name + except ImportError: + pass + raise ImportError(module_or_obj_name) + +def find_obj_in_module(module, relative_obj_name): + obj = module + for part in relative_obj_name.split('.'): + obj = getattr(obj, part) + return obj + +def import_obj(obj_name): + module, relative_obj_name = import_module(obj_name) + return find_obj_in_module(module, relative_obj_name) + +def call_func_by_name(*args, func=None, **kwargs): + assert func is not None + return import_obj(func)(*args, **kwargs) + +#---------------------------------------------------------------------------- +# Wrapper for tf.train.Optimizer that automatically takes care of: +# - Gradient averaging for multi-GPU training. +# - Dynamic loss scaling and typecasts for FP16 training. +# - Ignoring corrupted gradients that contain NaNs/Infs. +# - Reporting statistics. +# - Well-chosen default settings. + +class Optimizer: + def __init__( + self, + name = 'Train', + tf_optimizer = 'tf.train.AdamOptimizer', + learning_rate = 0.001, + use_loss_scaling = False, + loss_scaling_init = 64.0, + loss_scaling_inc = 0.0005, + loss_scaling_dec = 1.0, + **kwargs): + + # Init fields. + self.name = name + self.learning_rate = tf.convert_to_tensor(learning_rate) + self.id = self.name.replace('/', '.') + self.scope = tf.get_default_graph().unique_name(self.id) + self.optimizer_class = import_obj(tf_optimizer) + self.optimizer_kwargs = dict(kwargs) + self.use_loss_scaling = use_loss_scaling + self.loss_scaling_init = loss_scaling_init + self.loss_scaling_inc = loss_scaling_inc + self.loss_scaling_dec = loss_scaling_dec + self._grad_shapes = None # [shape, ...] + self._dev_opt = OrderedDict() # device => optimizer + self._dev_grads = OrderedDict() # device => [[(grad, var), ...], ...] + self._dev_ls_var = OrderedDict() # device => variable (log2 of loss scaling factor) + self._updates_applied = False + + # Register the gradients of the given loss function with respect to the given variables. + # Intended to be called once per GPU. + def register_gradients(self, loss, vars): + assert not self._updates_applied + + # Validate arguments. + if isinstance(vars, dict): + vars = list(vars.values()) # allow passing in Network.trainables as vars + assert isinstance(vars, list) and len(vars) >= 1 + assert all(is_tf_expression(expr) for expr in vars + [loss]) + if self._grad_shapes is None: + self._grad_shapes = [shape_to_list(var.shape) for var in vars] + assert len(vars) == len(self._grad_shapes) + assert all(shape_to_list(var.shape) == var_shape for var, var_shape in zip(vars, self._grad_shapes)) + dev = loss.device + assert all(var.device == dev for var in vars) + + # Register device and compute gradients. + with tf.name_scope(self.id + '_grad'), tf.device(dev): + if dev not in self._dev_opt: + opt_name = self.scope.replace('/', '_') + '_opt%d' % len(self._dev_opt) + self._dev_opt[dev] = self.optimizer_class(name=opt_name, learning_rate=self.learning_rate, **self.optimizer_kwargs) + self._dev_grads[dev] = [] + loss = self.apply_loss_scaling(tf.cast(loss, tf.float32)) + grads = self._dev_opt[dev].compute_gradients(loss, vars, gate_gradients=tf.train.Optimizer.GATE_NONE) # disable gating to reduce memory usage + grads = [(g, v) if g is not None else (tf.zeros_like(v), v) for g, v in grads] # replace disconnected gradients with zeros + self._dev_grads[dev].append(grads) + + # Construct training op to update the registered variables based on their gradients. + def apply_updates(self): + assert not self._updates_applied + self._updates_applied = True + devices = list(self._dev_grads.keys()) + total_grads = sum(len(grads) for grads in self._dev_grads.values()) + assert len(devices) >= 1 and total_grads >= 1 + ops = [] + with absolute_name_scope(self.scope): + + # Cast gradients to FP32 and calculate partial sum within each device. + dev_grads = OrderedDict() # device => [(grad, var), ...] + for dev_idx, dev in enumerate(devices): + with tf.name_scope('ProcessGrads%d' % dev_idx), tf.device(dev): + sums = [] + for gv in zip(*self._dev_grads[dev]): + assert all(v is gv[0][1] for g, v in gv) + g = [tf.cast(g, tf.float32) for g, v in gv] + g = g[0] if len(g) == 1 else tf.add_n(g) + sums.append((g, gv[0][1])) + dev_grads[dev] = sums + + # Sum gradients across devices. + if len(devices) > 1: + with tf.name_scope('SumAcrossGPUs'), tf.device(None): + for var_idx, grad_shape in enumerate(self._grad_shapes): + g = [dev_grads[dev][var_idx][0] for dev in devices] + if np.prod(grad_shape): # nccl does not support zero-sized tensors + g = tf.contrib.nccl.all_sum(g) + for dev, gg in zip(devices, g): + dev_grads[dev][var_idx] = (gg, dev_grads[dev][var_idx][1]) + + # Apply updates separately on each device. + for dev_idx, (dev, grads) in enumerate(dev_grads.items()): + with tf.name_scope('ApplyGrads%d' % dev_idx), tf.device(dev): + + # Scale gradients as needed. + if self.use_loss_scaling or total_grads > 1: + with tf.name_scope('Scale'): + coef = tf.constant(np.float32(1.0 / total_grads), name='coef') + coef = self.undo_loss_scaling(coef) + grads = [(g * coef, v) for g, v in grads] + + # Check for overflows. + with tf.name_scope('CheckOverflow'): + grad_ok = tf.reduce_all(tf.stack([tf.reduce_all(tf.is_finite(g)) for g, v in grads])) + + # Update weights and adjust loss scaling. + with tf.name_scope('UpdateWeights'): + opt = self._dev_opt[dev] + ls_var = self.get_loss_scaling_var(dev) + if not self.use_loss_scaling: + ops.append(tf.cond(grad_ok, lambda: opt.apply_gradients(grads), tf.no_op)) + else: + ops.append(tf.cond(grad_ok, + lambda: tf.group(tf.assign_add(ls_var, self.loss_scaling_inc), opt.apply_gradients(grads)), + lambda: tf.group(tf.assign_sub(ls_var, self.loss_scaling_dec)))) + + # Report statistics on the last device. + if dev == devices[-1]: + with tf.name_scope('Statistics'): + ops.append(autosummary(self.id + '/learning_rate', self.learning_rate)) + ops.append(autosummary(self.id + '/overflow_frequency', tf.where(grad_ok, 0, 1))) + if self.use_loss_scaling: + ops.append(autosummary(self.id + '/loss_scaling_log2', ls_var)) + + # Initialize variables and group everything into a single op. + self.reset_optimizer_state() + init_uninited_vars(list(self._dev_ls_var.values())) + return tf.group(*ops, name='TrainingOp') + + # Reset internal state of the underlying optimizer. + def reset_optimizer_state(self): + run([var.initializer for opt in self._dev_opt.values() for var in opt.variables()]) + + # Get or create variable representing log2 of the current dynamic loss scaling factor. + def get_loss_scaling_var(self, device): + if not self.use_loss_scaling: + return None + if device not in self._dev_ls_var: + with absolute_name_scope(self.scope + '/LossScalingVars'), tf.control_dependencies(None): + self._dev_ls_var[device] = tf.Variable(np.float32(self.loss_scaling_init), name='loss_scaling_var') + return self._dev_ls_var[device] + + # Apply dynamic loss scaling for the given expression. + def apply_loss_scaling(self, value): + assert is_tf_expression(value) + if not self.use_loss_scaling: + return value + return value * exp2(self.get_loss_scaling_var(value.device)) + + # Undo the effect of dynamic loss scaling for the given expression. + def undo_loss_scaling(self, value): + assert is_tf_expression(value) + if not self.use_loss_scaling: + return value + return value * exp2(-self.get_loss_scaling_var(value.device)) + +#---------------------------------------------------------------------------- +# Generic network abstraction. +# +# Acts as a convenience wrapper for a parameterized network construction +# function, providing several utility methods and convenient access to +# the inputs/outputs/weights. +# +# Network objects can be safely pickled and unpickled for long-term +# archival purposes. The pickling works reliably as long as the underlying +# network construction function is defined in a standalone Python module +# that has no side effects or application-specific imports. + +network_import_handlers = [] # Custom import handlers for dealing with legacy data in pickle import. +_network_import_modules = [] # Temporary modules create during pickle import. + +class Network: + def __init__(self, + name=None, # Network name. Used to select TensorFlow name and variable scopes. + func=None, # Fully qualified name of the underlying network construction function. + **static_kwargs): # Keyword arguments to be passed in to the network construction function. + + self._init_fields() + self.name = name + self.static_kwargs = dict(static_kwargs) + + # Init build func. + module, self._build_func_name = import_module(func) + self._build_module_src = inspect.getsource(module) + self._build_func = find_obj_in_module(module, self._build_func_name) + + # Init graph. + self._init_graph() + self.reset_vars() + + def _init_fields(self): + self.name = None # User-specified name, defaults to build func name if None. + self.scope = None # Unique TF graph scope, derived from the user-specified name. + self.static_kwargs = dict() # Arguments passed to the user-supplied build func. + self.num_inputs = 0 # Number of input tensors. + self.num_outputs = 0 # Number of output tensors. + self.input_shapes = [[]] # Input tensor shapes (NC or NCHW), including minibatch dimension. + self.output_shapes = [[]] # Output tensor shapes (NC or NCHW), including minibatch dimension. + self.input_shape = [] # Short-hand for input_shapes[0]. + self.output_shape = [] # Short-hand for output_shapes[0]. + self.input_templates = [] # Input placeholders in the template graph. + self.output_templates = [] # Output tensors in the template graph. + self.input_names = [] # Name string for each input. + self.output_names = [] # Name string for each output. + self.vars = OrderedDict() # All variables (localname => var). + self.trainables = OrderedDict() # Trainable variables (localname => var). + self._build_func = None # User-supplied build function that constructs the network. + self._build_func_name = None # Name of the build function. + self._build_module_src = None # Full source code of the module containing the build function. + self._run_cache = dict() # Cached graph data for Network.run(). + + def _init_graph(self): + # Collect inputs. + self.input_names = [] + for param in inspect.signature(self._build_func).parameters.values(): + if param.kind == param.POSITIONAL_OR_KEYWORD and param.default is param.empty: + self.input_names.append(param.name) + self.num_inputs = len(self.input_names) + assert self.num_inputs >= 1 + + # Choose name and scope. + if self.name is None: + self.name = self._build_func_name + self.scope = tf.get_default_graph().unique_name(self.name.replace('/', '_'), mark_as_used=False) + + # Build template graph. + with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE): + assert tf.get_variable_scope().name == self.scope + with absolute_name_scope(self.scope): # ignore surrounding name_scope + with tf.control_dependencies(None): # ignore surrounding control_dependencies + self.input_templates = [tf.placeholder(tf.float32, name=name) for name in self.input_names] + out_expr = self._build_func(*self.input_templates, is_template_graph=True, **self.static_kwargs) + + # Collect outputs. + assert is_tf_expression(out_expr) or isinstance(out_expr, tuple) + self.output_templates = [out_expr] if is_tf_expression(out_expr) else list(out_expr) + self.output_names = [t.name.split('/')[-1].split(':')[0] for t in self.output_templates] + self.num_outputs = len(self.output_templates) + assert self.num_outputs >= 1 + + # Populate remaining fields. + self.input_shapes = [shape_to_list(t.shape) for t in self.input_templates] + self.output_shapes = [shape_to_list(t.shape) for t in self.output_templates] + self.input_shape = self.input_shapes[0] + self.output_shape = self.output_shapes[0] + self.vars = OrderedDict([(self.get_var_localname(var), var) for var in tf.global_variables(self.scope + '/')]) + self.trainables = OrderedDict([(self.get_var_localname(var), var) for var in tf.trainable_variables(self.scope + '/')]) + + # Run initializers for all variables defined by this network. + def reset_vars(self): + run([var.initializer for var in self.vars.values()]) + + # Run initializers for all trainable variables defined by this network. + def reset_trainables(self): + run([var.initializer for var in self.trainables.values()]) + + # Get TensorFlow expression(s) for the output(s) of this network, given the inputs. + def get_output_for(self, *in_expr, return_as_list=False, **dynamic_kwargs): + assert len(in_expr) == self.num_inputs + all_kwargs = dict(self.static_kwargs) + all_kwargs.update(dynamic_kwargs) + with tf.variable_scope(self.scope, reuse=True): + assert tf.get_variable_scope().name == self.scope + named_inputs = [tf.identity(expr, name=name) for expr, name in zip(in_expr, self.input_names)] + out_expr = self._build_func(*named_inputs, **all_kwargs) + assert is_tf_expression(out_expr) or isinstance(out_expr, tuple) + if return_as_list: + out_expr = [out_expr] if is_tf_expression(out_expr) else list(out_expr) + return out_expr + + # Get the local name of a given variable, excluding any surrounding name scopes. + def get_var_localname(self, var_or_globalname): + assert is_tf_expression(var_or_globalname) or isinstance(var_or_globalname, str) + globalname = var_or_globalname if isinstance(var_or_globalname, str) else var_or_globalname.name + assert globalname.startswith(self.scope + '/') + localname = globalname[len(self.scope) + 1:] + localname = localname.split(':')[0] + return localname + + # Find variable by local or global name. + def find_var(self, var_or_localname): + assert is_tf_expression(var_or_localname) or isinstance(var_or_localname, str) + return self.vars[var_or_localname] if isinstance(var_or_localname, str) else var_or_localname + + # Get the value of a given variable as NumPy array. + # Note: This method is very inefficient -- prefer to use tfutil.run(list_of_vars) whenever possible. + def get_var(self, var_or_localname): + return self.find_var(var_or_localname).eval() + + # Set the value of a given variable based on the given NumPy array. + # Note: This method is very inefficient -- prefer to use tfutil.set_vars() whenever possible. + def set_var(self, var_or_localname, new_value): + return set_vars({self.find_var(var_or_localname): new_value}) + + # Pickle export. + def __getstate__(self): + return { + 'version': 2, + 'name': self.name, + 'static_kwargs': self.static_kwargs, + 'build_module_src': self._build_module_src, + 'build_func_name': self._build_func_name, + 'variables': list(zip(self.vars.keys(), run(list(self.vars.values()))))} + + # Pickle import. + def __setstate__(self, state): + self._init_fields() + + # Execute custom import handlers. + for handler in network_import_handlers: + state = handler(state) + + # Set basic fields. + assert state['version'] == 2 + self.name = state['name'] + self.static_kwargs = state['static_kwargs'] + self._build_module_src = state['build_module_src'] + self._build_func_name = state['build_func_name'] + + # Parse imported module. + module = imp.new_module('_tfutil_network_import_module_%d' % len(_network_import_modules)) + exec(self._build_module_src, module.__dict__) + self._build_func = find_obj_in_module(module, self._build_func_name) + _network_import_modules.append(module) # avoid gc + + # Init graph. + self._init_graph() + self.reset_vars() + set_vars({self.find_var(name): value for name, value in state['variables']}) + + # Create a clone of this network with its own copy of the variables. + def clone(self, name=None): + net = object.__new__(Network) + net._init_fields() + net.name = name if name is not None else self.name + net.static_kwargs = dict(self.static_kwargs) + net._build_module_src = self._build_module_src + net._build_func_name = self._build_func_name + net._build_func = self._build_func + net._init_graph() + net.copy_vars_from(self) + return net + + # Copy the values of all variables from the given network. + def copy_vars_from(self, src_net): + assert isinstance(src_net, Network) + name_to_value = run({name: src_net.find_var(name) for name in self.vars.keys()}) + set_vars({self.find_var(name): value for name, value in name_to_value.items()}) + + # Copy the values of all trainable variables from the given network. + def copy_trainables_from(self, src_net): + assert isinstance(src_net, Network) + name_to_value = run({name: src_net.find_var(name) for name in self.trainables.keys()}) + set_vars({self.find_var(name): value for name, value in name_to_value.items()}) + + # Create new network with the given parameters, and copy all variables from this network. + def convert(self, name=None, func=None, **static_kwargs): + net = Network(name, func, **static_kwargs) + net.copy_vars_from(self) + return net + + # Construct a TensorFlow op that updates the variables of this network + # to be slightly closer to those of the given network. + def setup_as_moving_average_of(self, src_net, beta=0.99, beta_nontrainable=0.0): + assert isinstance(src_net, Network) + with absolute_name_scope(self.scope): + with tf.name_scope('MovingAvg'): + ops = [] + for name, var in self.vars.items(): + if name in src_net.vars: + cur_beta = beta if name in self.trainables else beta_nontrainable + new_value = lerp(src_net.vars[name], var, cur_beta) + ops.append(var.assign(new_value)) + return tf.group(*ops) + + # Run this network for the given NumPy array(s), and return the output(s) as NumPy array(s). + def run(self, *in_arrays, + return_as_list = False, # True = return a list of NumPy arrays, False = return a single NumPy array, or a tuple if there are multiple outputs. + print_progress = False, # Print progress to the console? Useful for very large input arrays. + minibatch_size = None, # Maximum minibatch size to use, None = disable batching. + num_gpus = 1, # Number of GPUs to use. + out_mul = 1.0, # Multiplicative constant to apply to the output(s). + out_add = 0.0, # Additive constant to apply to the output(s). + out_shrink = 1, # Shrink the spatial dimensions of the output(s) by the given factor. + out_dtype = None, # Convert the output to the specified data type. + **dynamic_kwargs): # Additional keyword arguments to pass into the network construction function. + + assert len(in_arrays) == self.num_inputs + num_items = in_arrays[0].shape[0] + if minibatch_size is None: + minibatch_size = num_items + key = str([list(sorted(dynamic_kwargs.items())), num_gpus, out_mul, out_add, out_shrink, out_dtype]) + + # Build graph. + if key not in self._run_cache: + with absolute_name_scope(self.scope + '/Run'), tf.control_dependencies(None): + in_split = list(zip(*[tf.split(x, num_gpus) for x in self.input_templates])) + out_split = [] + for gpu in range(num_gpus): + with tf.device('/gpu:%d' % gpu): + out_expr = self.get_output_for(*in_split[gpu], return_as_list=True, **dynamic_kwargs) + if out_mul != 1.0: + out_expr = [x * out_mul for x in out_expr] + if out_add != 0.0: + out_expr = [x + out_add for x in out_expr] + if out_shrink > 1: + ksize = [1, 1, out_shrink, out_shrink] + out_expr = [tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding='VALID', data_format='NCHW') for x in out_expr] + if out_dtype is not None: + if tf.as_dtype(out_dtype).is_integer: + out_expr = [tf.round(x) for x in out_expr] + out_expr = [tf.saturate_cast(x, out_dtype) for x in out_expr] + out_split.append(out_expr) + self._run_cache[key] = [tf.concat(outputs, axis=0) for outputs in zip(*out_split)] + + # Run minibatches. + out_expr = self._run_cache[key] + out_arrays = [np.empty([num_items] + shape_to_list(expr.shape)[1:], expr.dtype.name) for expr in out_expr] + for mb_begin in range(0, num_items, minibatch_size): + if print_progress: + print('\r%d / %d' % (mb_begin, num_items), end='') + mb_end = min(mb_begin + minibatch_size, num_items) + mb_in = [src[mb_begin : mb_end] for src in in_arrays] + mb_out = tf.get_default_session().run(out_expr, dict(zip(self.input_templates, mb_in))) + for dst, src in zip(out_arrays, mb_out): + dst[mb_begin : mb_end] = src + + # Done. + if print_progress: + print('\r%d / %d' % (num_items, num_items)) + if not return_as_list: + out_arrays = out_arrays[0] if len(out_arrays) == 1 else tuple(out_arrays) + return out_arrays + + # Returns a list of (name, output_expr, trainable_vars) tuples corresponding to + # individual layers of the network. Mainly intended to be used for reporting. + def list_layers(self): + patterns_to_ignore = ['/Setter', '/new_value', '/Shape', '/strided_slice', '/Cast', '/concat'] + all_ops = tf.get_default_graph().get_operations() + all_ops = [op for op in all_ops if not any(p in op.name for p in patterns_to_ignore)] + layers = [] + + def recurse(scope, parent_ops, level): + prefix = scope + '/' + ops = [op for op in parent_ops if op.name == scope or op.name.startswith(prefix)] + + # Does not contain leaf nodes => expand immediate children. + if level == 0 or all('/' in op.name[len(prefix):] for op in ops): + visited = set() + for op in ops: + suffix = op.name[len(prefix):] + if '/' in suffix: + suffix = suffix[:suffix.index('/')] + if suffix not in visited: + recurse(prefix + suffix, ops, level + 1) + visited.add(suffix) + + # Otherwise => interpret as a layer. + else: + layer_name = scope[len(self.scope)+1:] + layer_output = ops[-1].outputs[0] + layer_trainables = [op.outputs[0] for op in ops if op.type.startswith('Variable') and self.get_var_localname(op.name) in self.trainables] + layers.append((layer_name, layer_output, layer_trainables)) + + recurse(self.scope, all_ops, 0) + return layers + + # Print a summary table of the network structure. + def print_layers(self, title=None, hide_layers_with_no_params=False): + if title is None: title = self.name + print() + print('%-28s%-12s%-24s%-24s' % (title, 'Params', 'OutputShape', 'WeightShape')) + print('%-28s%-12s%-24s%-24s' % (('---',) * 4)) + + total_params = 0 + for layer_name, layer_output, layer_trainables in self.list_layers(): + weights = [var for var in layer_trainables if var.name.endswith('/weight:0')] + num_params = sum(np.prod(shape_to_list(var.shape)) for var in layer_trainables) + total_params += num_params + if hide_layers_with_no_params and num_params == 0: + continue + + print('%-28s%-12s%-24s%-24s' % ( + layer_name, + num_params if num_params else '-', + layer_output.shape, + weights[0].shape if len(weights) == 1 else '-')) + + print('%-28s%-12s%-24s%-24s' % (('---',) * 4)) + print('%-28s%-12s%-24s%-24s' % ('Total', total_params, '', '')) + print() + + # Construct summary ops to include histograms of all trainable parameters in TensorBoard. + def setup_weight_histograms(self, title=None): + if title is None: title = self.name + with tf.name_scope(None), tf.device(None), tf.control_dependencies(None): + for localname, var in self.trainables.items(): + if '/' in localname: + p = localname.split('/') + name = title + '_' + p[-1] + '/' + '_'.join(p[:-1]) + else: + name = title + '_toplevel/' + localname + tf.summary.histogram(name, var) + +#---------------------------------------------------------------------------- diff --git a/models/pggan_tf_official/train.py b/models/pggan_tf_official/train.py new file mode 100644 index 0000000000000000000000000000000000000000..1e864246928be08d8f8167fa43a7accff04fdc0e --- /dev/null +++ b/models/pggan_tf_official/train.py @@ -0,0 +1,288 @@ +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +import os +import time +import numpy as np +import tensorflow as tf + +import config +import tfutil +import dataset +import misc + +#---------------------------------------------------------------------------- +# Choose the size and contents of the image snapshot grids that are exported +# periodically during training. + +def setup_snapshot_image_grid(G, training_set, + size = '1080p', # '1080p' = to be viewed on 1080p display, '4k' = to be viewed on 4k display. + layout = 'random'): # 'random' = grid contents are selected randomly, 'row_per_class' = each row corresponds to one class label. + + # Select size. + gw = 1; gh = 1 + if size == '1080p': + gw = np.clip(1920 // G.output_shape[3], 3, 32) + gh = np.clip(1080 // G.output_shape[2], 2, 32) + if size == '4k': + gw = np.clip(3840 // G.output_shape[3], 7, 32) + gh = np.clip(2160 // G.output_shape[2], 4, 32) + + # Fill in reals and labels. + reals = np.zeros([gw * gh] + training_set.shape, dtype=training_set.dtype) + labels = np.zeros([gw * gh, training_set.label_size], dtype=training_set.label_dtype) + for idx in range(gw * gh): + x = idx % gw; y = idx // gw + while True: + real, label = training_set.get_minibatch_np(1) + if layout == 'row_per_class' and training_set.label_size > 0: + if label[0, y % training_set.label_size] == 0.0: + continue + reals[idx] = real[0] + labels[idx] = label[0] + break + + # Generate latents. + latents = misc.random_latents(gw * gh, G) + return (gw, gh), reals, labels, latents + +#---------------------------------------------------------------------------- +# Just-in-time processing of training images before feeding them to the networks. + +def process_reals(x, lod, mirror_augment, drange_data, drange_net): + with tf.name_scope('ProcessReals'): + with tf.name_scope('DynamicRange'): + x = tf.cast(x, tf.float32) + x = misc.adjust_dynamic_range(x, drange_data, drange_net) + if mirror_augment: + with tf.name_scope('MirrorAugment'): + s = tf.shape(x) + mask = tf.random_uniform([s[0], 1, 1, 1], 0.0, 1.0) + mask = tf.tile(mask, [1, s[1], s[2], s[3]]) + x = tf.where(mask < 0.5, x, tf.reverse(x, axis=[3])) + with tf.name_scope('FadeLOD'): # Smooth crossfade between consecutive levels-of-detail. + s = tf.shape(x) + y = tf.reshape(x, [-1, s[1], s[2]//2, 2, s[3]//2, 2]) + y = tf.reduce_mean(y, axis=[3, 5], keepdims=True) + y = tf.tile(y, [1, 1, 1, 2, 1, 2]) + y = tf.reshape(y, [-1, s[1], s[2], s[3]]) + x = tfutil.lerp(x, y, lod - tf.floor(lod)) + with tf.name_scope('UpscaleLOD'): # Upscale to match the expected input/output size of the networks. + s = tf.shape(x) + factor = tf.cast(2 ** tf.floor(lod), tf.int32) + x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1]) + x = tf.tile(x, [1, 1, 1, factor, 1, factor]) + x = tf.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor]) + return x + +#---------------------------------------------------------------------------- +# Class for evaluating and storing the values of time-varying training parameters. + +class TrainingSchedule: + def __init__( + self, + cur_nimg, + training_set, + lod_initial_resolution = 4, # Image resolution used at the beginning. + lod_training_kimg = 600, # Thousands of real images to show before doubling the resolution. + lod_transition_kimg = 600, # Thousands of real images to show when fading in new layers. + minibatch_base = 16, # Maximum minibatch size, divided evenly among GPUs. + minibatch_dict = {}, # Resolution-specific overrides. + max_minibatch_per_gpu = {}, # Resolution-specific maximum minibatch size per GPU. + G_lrate_base = 0.001, # Learning rate for the generator. + G_lrate_dict = {}, # Resolution-specific overrides. + D_lrate_base = 0.001, # Learning rate for the discriminator. + D_lrate_dict = {}, # Resolution-specific overrides. + tick_kimg_base = 160, # Default interval of progress snapshots. + tick_kimg_dict = {4: 160, 8:140, 16:120, 32:100, 64:80, 128:60, 256:40, 512:20, 1024:10}): # Resolution-specific overrides. + + # Training phase. + self.kimg = cur_nimg / 1000.0 + phase_dur = lod_training_kimg + lod_transition_kimg + phase_idx = int(np.floor(self.kimg / phase_dur)) if phase_dur > 0 else 0 + phase_kimg = self.kimg - phase_idx * phase_dur + + # Level-of-detail and resolution. + self.lod = training_set.resolution_log2 + self.lod -= np.floor(np.log2(lod_initial_resolution)) + self.lod -= phase_idx + if lod_transition_kimg > 0: + self.lod -= max(phase_kimg - lod_training_kimg, 0.0) / lod_transition_kimg + self.lod = max(self.lod, 0.0) + self.resolution = 2 ** (training_set.resolution_log2 - int(np.floor(self.lod))) + + # Minibatch size. + self.minibatch = minibatch_dict.get(self.resolution, minibatch_base) + self.minibatch -= self.minibatch % config.num_gpus + if self.resolution in max_minibatch_per_gpu: + self.minibatch = min(self.minibatch, max_minibatch_per_gpu[self.resolution] * config.num_gpus) + + # Other parameters. + self.G_lrate = G_lrate_dict.get(self.resolution, G_lrate_base) + self.D_lrate = D_lrate_dict.get(self.resolution, D_lrate_base) + self.tick_kimg = tick_kimg_dict.get(self.resolution, tick_kimg_base) + +#---------------------------------------------------------------------------- +# Main training script. +# To run, comment/uncomment appropriate lines in config.py and launch train.py. + +def train_progressive_gan( + G_smoothing = 0.999, # Exponential running average of generator weights. + D_repeats = 1, # How many times the discriminator is trained per G iteration. + minibatch_repeats = 4, # Number of minibatches to run before adjusting training parameters. + reset_opt_for_new_lod = True, # Reset optimizer internal state (e.g. Adam moments) when new layers are introduced? + total_kimg = 15000, # Total length of the training, measured in thousands of real images. + mirror_augment = False, # Enable mirror augment? + drange_net = [-1,1], # Dynamic range used when feeding image data to the networks. + image_snapshot_ticks = 1, # How often to export image snapshots? + network_snapshot_ticks = 10, # How often to export network snapshots? + save_tf_graph = False, # Include full TensorFlow computation graph in the tfevents file? + save_weight_histograms = False, # Include weight histograms in the tfevents file? + resume_run_id = None, # Run ID or network pkl to resume training from, None = start from scratch. + resume_snapshot = None, # Snapshot index to resume training from, None = autodetect. + resume_kimg = 0.0, # Assumed training progress at the beginning. Affects reporting and training schedule. + resume_time = 0.0): # Assumed wallclock time at the beginning. Affects reporting. + + maintenance_start_time = time.time() + training_set = dataset.load_dataset(data_dir=config.data_dir, verbose=True, **config.dataset) + + # Construct networks. + with tf.device('/gpu:0'): + if resume_run_id is not None: + network_pkl = misc.locate_network_pkl(resume_run_id, resume_snapshot) + print('Loading networks from "%s"...' % network_pkl) + G, D, Gs = misc.load_pkl(network_pkl) + else: + print('Constructing networks...') + G = tfutil.Network('G', num_channels=training_set.shape[0], resolution=training_set.shape[1], label_size=training_set.label_size, **config.G) + D = tfutil.Network('D', num_channels=training_set.shape[0], resolution=training_set.shape[1], label_size=training_set.label_size, **config.D) + Gs = G.clone('Gs') + Gs_update_op = Gs.setup_as_moving_average_of(G, beta=G_smoothing) + G.print_layers(); D.print_layers() + + print('Building TensorFlow graph...') + with tf.name_scope('Inputs'): + lod_in = tf.placeholder(tf.float32, name='lod_in', shape=[]) + lrate_in = tf.placeholder(tf.float32, name='lrate_in', shape=[]) + minibatch_in = tf.placeholder(tf.int32, name='minibatch_in', shape=[]) + minibatch_split = minibatch_in // config.num_gpus + reals, labels = training_set.get_minibatch_tf() + reals_split = tf.split(reals, config.num_gpus) + labels_split = tf.split(labels, config.num_gpus) + G_opt = tfutil.Optimizer(name='TrainG', learning_rate=lrate_in, **config.G_opt) + D_opt = tfutil.Optimizer(name='TrainD', learning_rate=lrate_in, **config.D_opt) + for gpu in range(config.num_gpus): + with tf.name_scope('GPU%d' % gpu), tf.device('/gpu:%d' % gpu): + G_gpu = G if gpu == 0 else G.clone(G.name + '_shadow') + D_gpu = D if gpu == 0 else D.clone(D.name + '_shadow') + lod_assign_ops = [tf.assign(G_gpu.find_var('lod'), lod_in), tf.assign(D_gpu.find_var('lod'), lod_in)] + reals_gpu = process_reals(reals_split[gpu], lod_in, mirror_augment, training_set.dynamic_range, drange_net) + labels_gpu = labels_split[gpu] + with tf.name_scope('G_loss'), tf.control_dependencies(lod_assign_ops): + G_loss = tfutil.call_func_by_name(G=G_gpu, D=D_gpu, opt=G_opt, training_set=training_set, minibatch_size=minibatch_split, **config.G_loss) + with tf.name_scope('D_loss'), tf.control_dependencies(lod_assign_ops): + D_loss = tfutil.call_func_by_name(G=G_gpu, D=D_gpu, opt=D_opt, training_set=training_set, minibatch_size=minibatch_split, reals=reals_gpu, labels=labels_gpu, **config.D_loss) + G_opt.register_gradients(tf.reduce_mean(G_loss), G_gpu.trainables) + D_opt.register_gradients(tf.reduce_mean(D_loss), D_gpu.trainables) + G_train_op = G_opt.apply_updates() + D_train_op = D_opt.apply_updates() + + print('Setting up snapshot image grid...') + grid_size, grid_reals, grid_labels, grid_latents = setup_snapshot_image_grid(G, training_set, **config.grid) + sched = TrainingSchedule(total_kimg * 1000, training_set, **config.sched) + grid_fakes = Gs.run(grid_latents, grid_labels, minibatch_size=sched.minibatch//config.num_gpus) + + print('Setting up result dir...') + result_subdir = misc.create_result_subdir(config.result_dir, config.desc) + misc.save_image_grid(grid_reals, os.path.join(result_subdir, 'reals.png'), drange=training_set.dynamic_range, grid_size=grid_size) + misc.save_image_grid(grid_fakes, os.path.join(result_subdir, 'fakes%06d.png' % 0), drange=drange_net, grid_size=grid_size) + summary_log = tf.summary.FileWriter(result_subdir) + if save_tf_graph: + summary_log.add_graph(tf.get_default_graph()) + if save_weight_histograms: + G.setup_weight_histograms(); D.setup_weight_histograms() + + print('Training...') + cur_nimg = int(resume_kimg * 1000) + cur_tick = 0 + tick_start_nimg = cur_nimg + tick_start_time = time.time() + train_start_time = tick_start_time - resume_time + prev_lod = -1.0 + while cur_nimg < total_kimg * 1000: + + # Choose training parameters and configure training ops. + sched = TrainingSchedule(cur_nimg, training_set, **config.sched) + training_set.configure(sched.minibatch, sched.lod) + if reset_opt_for_new_lod: + if np.floor(sched.lod) != np.floor(prev_lod) or np.ceil(sched.lod) != np.ceil(prev_lod): + G_opt.reset_optimizer_state(); D_opt.reset_optimizer_state() + prev_lod = sched.lod + + # Run training ops. + for repeat in range(minibatch_repeats): + for _ in range(D_repeats): + tfutil.run([D_train_op, Gs_update_op], {lod_in: sched.lod, lrate_in: sched.D_lrate, minibatch_in: sched.minibatch}) + cur_nimg += sched.minibatch + tfutil.run([G_train_op], {lod_in: sched.lod, lrate_in: sched.G_lrate, minibatch_in: sched.minibatch}) + + # Perform maintenance tasks once per tick. + done = (cur_nimg >= total_kimg * 1000) + if cur_nimg >= tick_start_nimg + sched.tick_kimg * 1000 or done: + cur_tick += 1 + cur_time = time.time() + tick_kimg = (cur_nimg - tick_start_nimg) / 1000.0 + tick_start_nimg = cur_nimg + tick_time = cur_time - tick_start_time + total_time = cur_time - train_start_time + maintenance_time = tick_start_time - maintenance_start_time + maintenance_start_time = cur_time + + # Report progress. + print('tick %-5d kimg %-8.1f lod %-5.2f minibatch %-4d time %-12s sec/tick %-7.1f sec/kimg %-7.2f maintenance %.1f' % ( + tfutil.autosummary('Progress/tick', cur_tick), + tfutil.autosummary('Progress/kimg', cur_nimg / 1000.0), + tfutil.autosummary('Progress/lod', sched.lod), + tfutil.autosummary('Progress/minibatch', sched.minibatch), + misc.format_time(tfutil.autosummary('Timing/total_sec', total_time)), + tfutil.autosummary('Timing/sec_per_tick', tick_time), + tfutil.autosummary('Timing/sec_per_kimg', tick_time / tick_kimg), + tfutil.autosummary('Timing/maintenance_sec', maintenance_time))) + tfutil.autosummary('Timing/total_hours', total_time / (60.0 * 60.0)) + tfutil.autosummary('Timing/total_days', total_time / (24.0 * 60.0 * 60.0)) + tfutil.save_summaries(summary_log, cur_nimg) + + # Save snapshots. + if cur_tick % image_snapshot_ticks == 0 or done: + grid_fakes = Gs.run(grid_latents, grid_labels, minibatch_size=sched.minibatch//config.num_gpus) + misc.save_image_grid(grid_fakes, os.path.join(result_subdir, 'fakes%06d.png' % (cur_nimg // 1000)), drange=drange_net, grid_size=grid_size) + if cur_tick % network_snapshot_ticks == 0 or done: + misc.save_pkl((G, D, Gs), os.path.join(result_subdir, 'network-snapshot-%06d.pkl' % (cur_nimg // 1000))) + + # Record start time of the next tick. + tick_start_time = time.time() + + # Write final results. + misc.save_pkl((G, D, Gs), os.path.join(result_subdir, 'network-final.pkl')) + summary_log.close() + open(os.path.join(result_subdir, '_training-done.txt'), 'wt').close() + +#---------------------------------------------------------------------------- +# Main entry point. +# Calls the function indicated in config.py. + +if __name__ == "__main__": + misc.init_output_logging() + np.random.seed(config.random_seed) + print('Initializing TensorFlow...') + os.environ.update(config.env) + tfutil.init_tf(config.tf_config) + print('Running %s()...' % config.train['func']) + tfutil.call_func_by_name(**config.train) + print('Exiting...') + +#---------------------------------------------------------------------------- diff --git a/models/pggan_tf_official/util_scripts.py b/models/pggan_tf_official/util_scripts.py new file mode 100644 index 0000000000000000000000000000000000000000..0fc61d8eb00d604ab078882acb6f4b53374c0c86 --- /dev/null +++ b/models/pggan_tf_official/util_scripts.py @@ -0,0 +1,239 @@ +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +import os +import time +import re +import bisect +from collections import OrderedDict +import numpy as np +import tensorflow as tf +import scipy.ndimage +import scipy.misc + +import config +import misc +import tfutil +import train +import dataset + +#---------------------------------------------------------------------------- +# Generate random images or image grids using a previously trained network. +# To run, uncomment the appropriate line in config.py and launch train.py. + +def generate_fake_images(run_id, snapshot=None, grid_size=[1,1], num_pngs=1, image_shrink=1, png_prefix=None, random_seed=1000, minibatch_size=8): + network_pkl = misc.locate_network_pkl(run_id, snapshot) + if png_prefix is None: + png_prefix = misc.get_id_string_for_network_pkl(network_pkl) + '-' + random_state = np.random.RandomState(random_seed) + + print('Loading network from "%s"...' % network_pkl) + G, D, Gs = misc.load_network_pkl(run_id, snapshot) + + result_subdir = misc.create_result_subdir(config.result_dir, config.desc) + for png_idx in range(num_pngs): + print('Generating png %d / %d...' % (png_idx, num_pngs)) + latents = misc.random_latents(np.prod(grid_size), Gs, random_state=random_state) + labels = np.zeros([latents.shape[0], 0], np.float32) + images = Gs.run(latents, labels, minibatch_size=minibatch_size, num_gpus=config.num_gpus, out_mul=127.5, out_add=127.5, out_shrink=image_shrink, out_dtype=np.uint8) + misc.save_image_grid(images, os.path.join(result_subdir, '%s%06d.png' % (png_prefix, png_idx)), [0,255], grid_size) + open(os.path.join(result_subdir, '_done.txt'), 'wt').close() + +#---------------------------------------------------------------------------- +# Generate MP4 video of random interpolations using a previously trained network. +# To run, uncomment the appropriate line in config.py and launch train.py. + +def generate_interpolation_video(run_id, snapshot=None, grid_size=[1,1], image_shrink=1, image_zoom=1, duration_sec=60.0, smoothing_sec=1.0, mp4=None, mp4_fps=30, mp4_codec='libx265', mp4_bitrate='16M', random_seed=1000, minibatch_size=8): + network_pkl = misc.locate_network_pkl(run_id, snapshot) + if mp4 is None: + mp4 = misc.get_id_string_for_network_pkl(network_pkl) + '-lerp.mp4' + num_frames = int(np.rint(duration_sec * mp4_fps)) + random_state = np.random.RandomState(random_seed) + + print('Loading network from "%s"...' % network_pkl) + G, D, Gs = misc.load_network_pkl(run_id, snapshot) + + print('Generating latent vectors...') + shape = [num_frames, np.prod(grid_size)] + Gs.input_shape[1:] # [frame, image, channel, component] + all_latents = random_state.randn(*shape).astype(np.float32) + all_latents = scipy.ndimage.gaussian_filter(all_latents, [smoothing_sec * mp4_fps] + [0] * len(Gs.input_shape), mode='wrap') + all_latents /= np.sqrt(np.mean(np.square(all_latents))) + + # Frame generation func for moviepy. + def make_frame(t): + frame_idx = int(np.clip(np.round(t * mp4_fps), 0, num_frames - 1)) + latents = all_latents[frame_idx] + labels = np.zeros([latents.shape[0], 0], np.float32) + images = Gs.run(latents, labels, minibatch_size=minibatch_size, num_gpus=config.num_gpus, out_mul=127.5, out_add=127.5, out_shrink=image_shrink, out_dtype=np.uint8) + grid = misc.create_image_grid(images, grid_size).transpose(1, 2, 0) # HWC + if image_zoom > 1: + grid = scipy.ndimage.zoom(grid, [image_zoom, image_zoom, 1], order=0) + if grid.shape[2] == 1: + grid = grid.repeat(3, 2) # grayscale => RGB + return grid + + # Generate video. + import moviepy.editor # pip install moviepy + result_subdir = misc.create_result_subdir(config.result_dir, config.desc) + moviepy.editor.VideoClip(make_frame, duration=duration_sec).write_videofile(os.path.join(result_subdir, mp4), fps=mp4_fps, codec='libx264', bitrate=mp4_bitrate) + open(os.path.join(result_subdir, '_done.txt'), 'wt').close() + +#---------------------------------------------------------------------------- +# Generate MP4 video of training progress for a previous training run. +# To run, uncomment the appropriate line in config.py and launch train.py. + +def generate_training_video(run_id, duration_sec=20.0, time_warp=1.5, mp4=None, mp4_fps=30, mp4_codec='libx265', mp4_bitrate='16M'): + src_result_subdir = misc.locate_result_subdir(run_id) + if mp4 is None: + mp4 = os.path.basename(src_result_subdir) + '-train.mp4' + + # Parse log. + times = [] + snaps = [] # [(png, kimg, lod), ...] + with open(os.path.join(src_result_subdir, 'log.txt'), 'rt') as log: + for line in log: + k = re.search(r'kimg ([\d\.]+) ', line) + l = re.search(r'lod ([\d\.]+) ', line) + t = re.search(r'time (\d+d)? *(\d+h)? *(\d+m)? *(\d+s)? ', line) + if k and l and t: + k = float(k.group(1)) + l = float(l.group(1)) + t = [int(t.group(i)[:-1]) if t.group(i) else 0 for i in range(1, 5)] + t = t[0] * 24*60*60 + t[1] * 60*60 + t[2] * 60 + t[3] + png = os.path.join(src_result_subdir, 'fakes%06d.png' % int(np.floor(k))) + if os.path.isfile(png): + times.append(t) + snaps.append((png, k, l)) + assert len(times) + + # Frame generation func for moviepy. + png_cache = [None, None] # [png, img] + def make_frame(t): + wallclock = ((t / duration_sec) ** time_warp) * times[-1] + png, kimg, lod = snaps[max(bisect.bisect(times, wallclock) - 1, 0)] + if png_cache[0] == png: + img = png_cache[1] + else: + img = scipy.misc.imread(png) + while img.shape[1] > 1920 or img.shape[0] > 1080: + img = img.astype(np.float32).reshape(img.shape[0]//2, 2, img.shape[1]//2, 2, -1).mean(axis=(1,3)) + png_cache[:] = [png, img] + img = misc.draw_text_label(img, 'lod %.2f' % lod, 16, img.shape[0]-4, alignx=0.0, aligny=1.0) + img = misc.draw_text_label(img, misc.format_time(int(np.rint(wallclock))), img.shape[1]//2, img.shape[0]-4, alignx=0.5, aligny=1.0) + img = misc.draw_text_label(img, '%.0f kimg' % kimg, img.shape[1]-16, img.shape[0]-4, alignx=1.0, aligny=1.0) + return img + + # Generate video. + import moviepy.editor # pip install moviepy + result_subdir = misc.create_result_subdir(config.result_dir, config.desc) + moviepy.editor.VideoClip(make_frame, duration=duration_sec).write_videofile(os.path.join(result_subdir, mp4), fps=mp4_fps, codec='libx264', bitrate=mp4_bitrate) + open(os.path.join(result_subdir, '_done.txt'), 'wt').close() + +#---------------------------------------------------------------------------- +# Evaluate one or more metrics for a previous training run. +# To run, uncomment one of the appropriate lines in config.py and launch train.py. + +def evaluate_metrics(run_id, log, metrics, num_images, real_passes, minibatch_size=None): + metric_class_names = { + 'swd': 'metrics.sliced_wasserstein.API', + 'fid': 'metrics.frechet_inception_distance.API', + 'is': 'metrics.inception_score.API', + 'msssim': 'metrics.ms_ssim.API', + } + + # Locate training run and initialize logging. + result_subdir = misc.locate_result_subdir(run_id) + snapshot_pkls = misc.list_network_pkls(result_subdir, include_final=False) + assert len(snapshot_pkls) >= 1 + log_file = os.path.join(result_subdir, log) + print('Logging output to', log_file) + misc.set_output_log_file(log_file) + + # Initialize dataset and select minibatch size. + dataset_obj, mirror_augment = misc.load_dataset_for_previous_run(result_subdir, verbose=True, shuffle_mb=0) + if minibatch_size is None: + minibatch_size = np.clip(8192 // dataset_obj.shape[1], 4, 256) + + # Initialize metrics. + metric_objs = [] + for name in metrics: + class_name = metric_class_names.get(name, name) + print('Initializing %s...' % class_name) + class_def = tfutil.import_obj(class_name) + image_shape = [3] + dataset_obj.shape[1:] + obj = class_def(num_images=num_images, image_shape=image_shape, image_dtype=np.uint8, minibatch_size=minibatch_size) + tfutil.init_uninited_vars() + mode = 'warmup' + obj.begin(mode) + for idx in range(10): + obj.feed(mode, np.random.randint(0, 256, size=[minibatch_size]+image_shape, dtype=np.uint8)) + obj.end(mode) + metric_objs.append(obj) + + # Print table header. + print() + print('%-10s%-12s' % ('Snapshot', 'Time_eval'), end='') + for obj in metric_objs: + for name, fmt in zip(obj.get_metric_names(), obj.get_metric_formatting()): + print('%-*s' % (len(fmt % 0), name), end='') + print() + print('%-10s%-12s' % ('---', '---'), end='') + for obj in metric_objs: + for fmt in obj.get_metric_formatting(): + print('%-*s' % (len(fmt % 0), '---'), end='') + print() + + # Feed in reals. + for title, mode in [('Reals', 'reals'), ('Reals2', 'fakes')][:real_passes]: + print('%-10s' % title, end='') + time_begin = time.time() + labels = np.zeros([num_images, dataset_obj.label_size], dtype=np.float32) + [obj.begin(mode) for obj in metric_objs] + for begin in range(0, num_images, minibatch_size): + end = min(begin + minibatch_size, num_images) + images, labels[begin:end] = dataset_obj.get_minibatch_np(end - begin) + if mirror_augment: + images = misc.apply_mirror_augment(images) + if images.shape[1] == 1: + images = np.tile(images, [1, 3, 1, 1]) # grayscale => RGB + [obj.feed(mode, images) for obj in metric_objs] + results = [obj.end(mode) for obj in metric_objs] + print('%-12s' % misc.format_time(time.time() - time_begin), end='') + for obj, vals in zip(metric_objs, results): + for val, fmt in zip(vals, obj.get_metric_formatting()): + print(fmt % val, end='') + print() + + # Evaluate each network snapshot. + for snapshot_idx, snapshot_pkl in enumerate(reversed(snapshot_pkls)): + prefix = 'network-snapshot-'; postfix = '.pkl' + snapshot_name = os.path.basename(snapshot_pkl) + assert snapshot_name.startswith(prefix) and snapshot_name.endswith(postfix) + snapshot_kimg = int(snapshot_name[len(prefix) : -len(postfix)]) + + print('%-10d' % snapshot_kimg, end='') + mode ='fakes' + [obj.begin(mode) for obj in metric_objs] + time_begin = time.time() + with tf.Graph().as_default(), tfutil.create_session(config.tf_config).as_default(): + G, D, Gs = misc.load_pkl(snapshot_pkl) + for begin in range(0, num_images, minibatch_size): + end = min(begin + minibatch_size, num_images) + latents = misc.random_latents(end - begin, Gs) + images = Gs.run(latents, labels[begin:end], num_gpus=config.num_gpus, out_mul=127.5, out_add=127.5, out_dtype=np.uint8) + if images.shape[1] == 1: + images = np.tile(images, [1, 3, 1, 1]) # grayscale => RGB + [obj.feed(mode, images) for obj in metric_objs] + results = [obj.end(mode) for obj in metric_objs] + print('%-12s' % misc.format_time(time.time() - time_begin), end='') + for obj, vals in zip(metric_objs, results): + for val, fmt in zip(vals, obj.get_metric_formatting()): + print(fmt % val, end='') + print() + print() + +#---------------------------------------------------------------------------- diff --git a/models/pretrain/Pretrained_Models_Should_Be_Placed_Here b/models/pretrain/Pretrained_Models_Should_Be_Placed_Here new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/pretrain/stylegan_ffhq.pth b/models/pretrain/stylegan_ffhq.pth new file mode 100644 index 0000000000000000000000000000000000000000..52e99c34059d5b4330435f678f3d6eff9594d1b8 --- /dev/null +++ b/models/pretrain/stylegan_ffhq.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:57fa622002080d771231d7753450b924a3d1fe8796bd176a9d7b405b734bec07 +size 116178586 diff --git a/models/stylegan2_generator.py b/models/stylegan2_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..ec4e805f62ceb87f3ae1bc7b69085701bf423b28 --- /dev/null +++ b/models/stylegan2_generator.py @@ -0,0 +1,189 @@ +# python3.7 +"""Contains the generator class of StyleGAN. + +Basically, this class is derived from the `BaseGenerator` class defined in +`base_generator.py`. +""" + +import os +import numpy as np +import pickle +from PIL import Image + +from typing import List, Optional, Tuple, Union + +import torch + +from . import model_settings +from .stylegan3_official_network import StyleGAN3GeneratorModel +from .base_generator import BaseGenerator + +__all__ = ['StyleGANGenerator'] + +def make_transform(translate: Tuple[float,float], angle: float): + m = np.eye(3) + s = np.sin(angle/360.0*np.pi*2) + c = np.cos(angle/360.0*np.pi*2) + m[0][0] = c + m[0][1] = s + m[0][2] = translate[0] + m[1][0] = -s + m[1][1] = c + m[1][2] = translate[1] + return m + +class StyleGAN2Generator(BaseGenerator): + """Defines the generator class of StyleGAN. + + Different from conventional GAN, StyleGAN introduces a disentangled latent + space (i.e., W space) besides the normal latent space (i.e., Z space). Then, + the disentangled latent code, w, is fed into each convolutional layer to + modulate the `style` of the synthesis through AdaIN (Adaptive Instance + Normalization) layer. Normally, the w's fed into all layers are the same. But, + they can actually be different to make different layers get different styles. + Accordingly, an extended space (i.e. W+ space) is used to gather all w's + together. Taking the official StyleGAN model trained on FF-HQ dataset as an + instance, there are + (1) Z space, with dimension (512,) + (2) W space, with dimension (512,) + (3) W+ space, with dimension (18, 512) + """ + + def __init__(self, model_name, logger=None): + self.truncation_psi = model_settings.STYLEGAN_TRUNCATION_PSI + self.truncation_layers = model_settings.STYLEGAN_TRUNCATION_LAYERS + self.randomize_noise = model_settings.STYLEGAN_RANDOMIZE_NOISE + self.model_specific_vars = ['truncation.truncation'] + super().__init__(model_name, logger) + self.num_layers = (int(np.log2(self.resolution)) - 1) * 2 + assert self.gan_type in ['stylegan3', 'stylegan2'] + + def build(self): + self.check_attr('w_space_dim') + self.check_attr('fused_scale') + self.model = StyleGAN3GeneratorModel( + img_resolution=self.resolution, + w_dim=self.w_space_dim, + z_dim=self.latent_space_dim, + c_dim=self.c_space_dim, + img_channels=3 + ) + + + def load(self): + self.logger.info(f'Loading pytorch model from `{self.model_path}`.') + with open(self.model_path, 'rb') as f: + self.model = pickle.load(f)['G_ema'] + self.logger.info(f'Successfully loaded!') + # self.lod = self.model.synthesis.lod.to(self.cpu_device).tolist() + # self.logger.info(f' `lod` of the loaded model is {self.lod}.') + + + def sample(self, num, latent_space_type='Z'): + """Samples latent codes randomly. + + Args: + num: Number of latent codes to sample. Should be positive. + latent_space_type: Type of latent space from which to sample latent code. + Only [`Z`, `W`, `WP`] are supported. Case insensitive. (default: `Z`) + + Returns: + A `numpy.ndarray` as sampled latend codes. + + Raises: + ValueError: If the given `latent_space_type` is not supported. + """ + latent_space_type = latent_space_type.upper() + if latent_space_type == 'Z': + latent_codes = np.random.randn(num, self.latent_space_dim) + elif latent_space_type == 'W': + latent_codes = np.random.randn(num, self.w_space_dim) + elif latent_space_type == 'WP': + latent_codes = np.random.randn(num, self.num_layers, self.w_space_dim) + else: + raise ValueError(f'Latent space type `{latent_space_type}` is invalid!') + + return latent_codes.astype(np.float32) + + def preprocess(self, latent_codes, latent_space_type='Z'): + """Preprocesses the input latent code if needed. + + Args: + latent_codes: The input latent codes for preprocessing. + latent_space_type: Type of latent space to which the latent codes belong. + Only [`Z`, `W`, `WP`] are supported. Case insensitive. (default: `Z`) + + Returns: + The preprocessed latent codes which can be used as final input for the + generator. + + Raises: + ValueError: If the given `latent_space_type` is not supported. + """ + if not isinstance(latent_codes, np.ndarray): + raise ValueError(f'Latent codes should be with type `numpy.ndarray`!') + + latent_space_type = latent_space_type.upper() + if latent_space_type == 'Z': + latent_codes = latent_codes.reshape(-1, self.latent_space_dim) + norm = np.linalg.norm(latent_codes, axis=1, keepdims=True) + latent_codes = latent_codes / norm * np.sqrt(self.latent_space_dim) + elif latent_space_type == 'W': + latent_codes = latent_codes.reshape(-1, self.w_space_dim) + elif latent_space_type == 'WP': + latent_codes = latent_codes.reshape(-1, self.num_layers, self.w_space_dim) + else: + raise ValueError(f'Latent space type `{latent_space_type}` is invalid!') + + return latent_codes.astype(np.float32) + + def easy_sample(self, num, latent_space_type='Z'): + return self.sample(num, latent_space_type) + + def synthesize(self, + latent_codes, + latent_space_type='Z', + generate_style=False, + generate_image=True): + """Synthesizes images with given latent codes. + + One can choose whether to generate the layer-wise style codes. + + Args: + latent_codes: Input latent codes for image synthesis. + latent_space_type: Type of latent space to which the latent codes belong. + Only [`Z`, `W`, `WP`] are supported. Case insensitive. (default: `Z`) + generate_style: Whether to generate the layer-wise style codes. (default: + False) + generate_image: Whether to generate the final image synthesis. (default: + True) + + Returns: + A dictionary whose values are raw outputs from the generator. + """ + if not isinstance(latent_codes, np.ndarray): + raise ValueError(f'Latent codes should be with type `numpy.ndarray`!') + + results = {} + translate = (0,0) + rotate=0.0 + z = torch.from_numpy(latent_codes).to(self.run_device) + label = torch.zeros([1, self.c_space_dim]).to(self.run_device) + + if hasattr(self.model.synthesis, 'input'): + m = make_transform(translate, rotate) + m = np.linalg.inv(m) + self.model.synthesis.input.transform.copy_(torch.from_numpy(m)) + + ws = self.model.mapping(z, label) + #wps = self.model.truncation(w) + img = self.model(z, label) + img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8) + img = img.cpu().numpy() + + results['image'] = img + results['z'] = latent_codes + results['w'] = ws.detach().cpu().numpy() + #results['wp'] = wps.detach().cpu().numpy() + + return results diff --git a/models/stylegan3_generator.py b/models/stylegan3_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..a77e1f105dd356db0764b7c2629a1eee4a36d4b6 --- /dev/null +++ b/models/stylegan3_generator.py @@ -0,0 +1,189 @@ +# python3.7 +"""Contains the generator class of StyleGAN. + +Basically, this class is derived from the `BaseGenerator` class defined in +`base_generator.py`. +""" + +import os +import numpy as np +import pickle +from PIL import Image + +from typing import List, Optional, Tuple, Union + +import torch + +from . import model_settings +from .stylegan3_official_network import StyleGAN3GeneratorModel +from .base_generator import BaseGenerator + +__all__ = ['StyleGANGenerator'] + +def make_transform(translate: Tuple[float,float], angle: float): + m = np.eye(3) + s = np.sin(angle/360.0*np.pi*2) + c = np.cos(angle/360.0*np.pi*2) + m[0][0] = c + m[0][1] = s + m[0][2] = translate[0] + m[1][0] = -s + m[1][1] = c + m[1][2] = translate[1] + return m + +class StyleGAN3Generator(BaseGenerator): + """Defines the generator class of StyleGAN. + + Different from conventional GAN, StyleGAN introduces a disentangled latent + space (i.e., W space) besides the normal latent space (i.e., Z space). Then, + the disentangled latent code, w, is fed into each convolutional layer to + modulate the `style` of the synthesis through AdaIN (Adaptive Instance + Normalization) layer. Normally, the w's fed into all layers are the same. But, + they can actually be different to make different layers get different styles. + Accordingly, an extended space (i.e. W+ space) is used to gather all w's + together. Taking the official StyleGAN model trained on FF-HQ dataset as an + instance, there are + (1) Z space, with dimension (512,) + (2) W space, with dimension (512,) + (3) W+ space, with dimension (18, 512) + """ + + def __init__(self, model_name, logger=None): + self.truncation_psi = model_settings.STYLEGAN_TRUNCATION_PSI + self.truncation_layers = model_settings.STYLEGAN_TRUNCATION_LAYERS + self.randomize_noise = model_settings.STYLEGAN_RANDOMIZE_NOISE + self.model_specific_vars = ['truncation.truncation'] + super().__init__(model_name, logger) + self.num_layers = (int(np.log2(self.resolution)) - 1) * 2 + assert self.gan_type in ['stylegan3', 'stylegan2'] + + def build(self): + self.check_attr('w_space_dim') + self.check_attr('fused_scale') + self.model = StyleGAN3GeneratorModel( + img_resolution=self.resolution, + w_dim=self.w_space_dim, + z_dim=self.latent_space_dim, + c_dim=self.c_space_dim, + img_channels=3 + ) + + + def load(self): + self.logger.info(f'Loading pytorch model from `{self.model_path}`.') + with open(self.model_path, 'rb') as f: + self.model = pickle.load(f)['G_ema'] + self.logger.info(f'Successfully loaded!') + # self.lod = self.model.synthesis.lod.to(self.cpu_device).tolist() + # self.logger.info(f' `lod` of the loaded model is {self.lod}.') + + + def sample(self, num, latent_space_type='Z'): + """Samples latent codes randomly. + + Args: + num: Number of latent codes to sample. Should be positive. + latent_space_type: Type of latent space from which to sample latent code. + Only [`Z`, `W`, `WP`] are supported. Case insensitive. (default: `Z`) + + Returns: + A `numpy.ndarray` as sampled latend codes. + + Raises: + ValueError: If the given `latent_space_type` is not supported. + """ + latent_space_type = latent_space_type.upper() + if latent_space_type == 'Z': + latent_codes = np.random.randn(num, self.latent_space_dim) + elif latent_space_type == 'W': + latent_codes = np.random.randn(num, self.w_space_dim) + elif latent_space_type == 'WP': + latent_codes = np.random.randn(num, self.num_layers, self.w_space_dim) + else: + raise ValueError(f'Latent space type `{latent_space_type}` is invalid!') + + return latent_codes.astype(np.float32) + + def preprocess(self, latent_codes, latent_space_type='Z'): + """Preprocesses the input latent code if needed. + + Args: + latent_codes: The input latent codes for preprocessing. + latent_space_type: Type of latent space to which the latent codes belong. + Only [`Z`, `W`, `WP`] are supported. Case insensitive. (default: `Z`) + + Returns: + The preprocessed latent codes which can be used as final input for the + generator. + + Raises: + ValueError: If the given `latent_space_type` is not supported. + """ + if not isinstance(latent_codes, np.ndarray): + raise ValueError(f'Latent codes should be with type `numpy.ndarray`!') + + latent_space_type = latent_space_type.upper() + if latent_space_type == 'Z': + latent_codes = latent_codes.reshape(-1, self.latent_space_dim) + norm = np.linalg.norm(latent_codes, axis=1, keepdims=True) + latent_codes = latent_codes / norm * np.sqrt(self.latent_space_dim) + elif latent_space_type == 'W': + latent_codes = latent_codes.reshape(-1, self.w_space_dim) + elif latent_space_type == 'WP': + latent_codes = latent_codes.reshape(-1, self.num_layers, self.w_space_dim) + else: + raise ValueError(f'Latent space type `{latent_space_type}` is invalid!') + + return latent_codes.astype(np.float32) + + def easy_sample(self, num, latent_space_type='Z'): + return self.sample(num, latent_space_type) + + def synthesize(self, + latent_codes, + latent_space_type='Z', + generate_style=False, + generate_image=True): + """Synthesizes images with given latent codes. + + One can choose whether to generate the layer-wise style codes. + + Args: + latent_codes: Input latent codes for image synthesis. + latent_space_type: Type of latent space to which the latent codes belong. + Only [`Z`, `W`, `WP`] are supported. Case insensitive. (default: `Z`) + generate_style: Whether to generate the layer-wise style codes. (default: + False) + generate_image: Whether to generate the final image synthesis. (default: + True) + + Returns: + A dictionary whose values are raw outputs from the generator. + """ + if not isinstance(latent_codes, np.ndarray): + raise ValueError(f'Latent codes should be with type `numpy.ndarray`!') + + results = {} + translate = (0,0) + rotate=0.0 + z = torch.from_numpy(latent_codes).to(self.run_device) + label = torch.zeros([1, self.c_space_dim]).to(self.run_device) + + if hasattr(self.model.synthesis, 'input'): + m = make_transform(translate, rotate) + m = np.linalg.inv(m) + self.model.synthesis.input.transform.copy_(torch.from_numpy(m)) + + ws = self.model.mapping(z, label) + #wps = self.model.truncation(w) + img = self.model(z, label) + img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8) + img = img.cpu().numpy() + + results['image'] = img + results['z'] = latent_codes + results['w'] = ws.detach().cpu().numpy() + #results['wp'] = wps.detach().cpu().numpy() + + return results diff --git a/models/stylegan3_official_network.py b/models/stylegan3_official_network.py new file mode 100644 index 0000000000000000000000000000000000000000..00054b48b0d3f1f693da4697d2437f8309fb0855 --- /dev/null +++ b/models/stylegan3_official_network.py @@ -0,0 +1,513 @@ +# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# NVIDIA CORPORATION and its licensors retain all intellectual property +# and proprietary rights in and to this software, related documentation +# and any modifications thereto. Any use, reproduction, disclosure or +# distribution of this software and related documentation without an express +# license agreement from NVIDIA CORPORATION is strictly prohibited. + +"""Generator architecture from the paper +"Alias-Free Generative Adversarial Networks".""" + +import numpy as np +import scipy.signal +import scipy.optimize +import torch +from torch_utils import misc +from torch_utils import persistence +from torch_utils.ops import conv2d_gradfix +from torch_utils.ops import filtered_lrelu +from torch_utils.ops import bias_act + +#---------------------------------------------------------------------------- + +@misc.profiled_function +def modulated_conv2d( + x, # Input tensor: [batch_size, in_channels, in_height, in_width] + w, # Weight tensor: [out_channels, in_channels, kernel_height, kernel_width] + s, # Style tensor: [batch_size, in_channels] + demodulate = True, # Apply weight demodulation? + padding = 0, # Padding: int or [padH, padW] + input_gain = None, # Optional scale factors for the input channels: [], [in_channels], or [batch_size, in_channels] +): + with misc.suppress_tracer_warnings(): # this value will be treated as a constant + batch_size = int(x.shape[0]) + out_channels, in_channels, kh, kw = w.shape + misc.assert_shape(w, [out_channels, in_channels, kh, kw]) # [OIkk] + misc.assert_shape(x, [batch_size, in_channels, None, None]) # [NIHW] + misc.assert_shape(s, [batch_size, in_channels]) # [NI] + + # Pre-normalize inputs. + if demodulate: + w = w * w.square().mean([1,2,3], keepdim=True).rsqrt() + s = s * s.square().mean().rsqrt() + + # Modulate weights. + w = w.unsqueeze(0) # [NOIkk] + w = w * s.unsqueeze(1).unsqueeze(3).unsqueeze(4) # [NOIkk] + + # Demodulate weights. + if demodulate: + dcoefs = (w.square().sum(dim=[2,3,4]) + 1e-8).rsqrt() # [NO] + w = w * dcoefs.unsqueeze(2).unsqueeze(3).unsqueeze(4) # [NOIkk] + + # Apply input scaling. + if input_gain is not None: + input_gain = input_gain.expand(batch_size, in_channels) # [NI] + w = w * input_gain.unsqueeze(1).unsqueeze(3).unsqueeze(4) # [NOIkk] + + # Execute as one fused op using grouped convolution. + x = x.reshape(1, -1, *x.shape[2:]) + w = w.reshape(-1, in_channels, kh, kw) + x = conv2d_gradfix.conv2d(input=x, weight=w.to(x.dtype), padding=padding, groups=batch_size) + x = x.reshape(batch_size, -1, *x.shape[2:]) + return x + +#---------------------------------------------------------------------------- + +@persistence.persistent_class +class FullyConnectedLayer(torch.nn.Module): + def __init__(self, + in_features, # Number of input features. + out_features, # Number of output features. + activation = 'linear', # Activation function: 'relu', 'lrelu', etc. + bias = True, # Apply additive bias before the activation function? + lr_multiplier = 1, # Learning rate multiplier. + weight_init = 1, # Initial standard deviation of the weight tensor. + bias_init = 0, # Initial value of the additive bias. + ): + super().__init__() + self.in_features = in_features + self.out_features = out_features + self.activation = activation + self.weight = torch.nn.Parameter(torch.randn([out_features, in_features]) * (weight_init / lr_multiplier)) + bias_init = np.broadcast_to(np.asarray(bias_init, dtype=np.float32), [out_features]) + self.bias = torch.nn.Parameter(torch.from_numpy(bias_init / lr_multiplier)) if bias else None + self.weight_gain = lr_multiplier / np.sqrt(in_features) + self.bias_gain = lr_multiplier + + def forward(self, x): + w = self.weight.to(x.dtype) * self.weight_gain + b = self.bias + if b is not None: + b = b.to(x.dtype) + if self.bias_gain != 1: + b = b * self.bias_gain + if self.activation == 'linear' and b is not None: + x = torch.addmm(b.unsqueeze(0), x, w.t()) + else: + x = x.matmul(w.t()) + x = bias_act.bias_act(x, b, act=self.activation) + return x + + def extra_repr(self): + return f'in_features={self.in_features:d}, out_features={self.out_features:d}, activation={self.activation:s}' + +#---------------------------------------------------------------------------- + +@persistence.persistent_class +class MappingNetwork(torch.nn.Module): + def __init__(self, + z_dim, # Input latent (Z) dimensionality. + c_dim, # Conditioning label (C) dimensionality, 0 = no labels. + w_dim, # Intermediate latent (W) dimensionality. + num_ws, # Number of intermediate latents to output. + num_layers = 2, # Number of mapping layers. + lr_multiplier = 0.01, # Learning rate multiplier for the mapping layers. + w_avg_beta = 0.998, # Decay for tracking the moving average of W during training. + ): + super().__init__() + self.z_dim = z_dim + self.c_dim = c_dim + self.w_dim = w_dim + self.num_ws = num_ws + self.num_layers = num_layers + self.w_avg_beta = w_avg_beta + + # Construct layers. + self.embed = FullyConnectedLayer(self.c_dim, self.w_dim) if self.c_dim > 0 else None + features = [self.z_dim + (self.w_dim if self.c_dim > 0 else 0)] + [self.w_dim] * self.num_layers + for idx, in_features, out_features in zip(range(num_layers), features[:-1], features[1:]): + layer = FullyConnectedLayer(in_features, out_features, activation='lrelu', lr_multiplier=lr_multiplier) + setattr(self, f'fc{idx}', layer) + self.register_buffer('w_avg', torch.zeros([w_dim])) + + def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False): + misc.assert_shape(z, [None, self.z_dim]) + if truncation_cutoff is None: + truncation_cutoff = self.num_ws + + # Embed, normalize, and concatenate inputs. + x = z.to(torch.float32) + x = x * (x.square().mean(1, keepdim=True) + 1e-8).rsqrt() + if self.c_dim > 0: + misc.assert_shape(c, [None, self.c_dim]) + y = self.embed(c.to(torch.float32)) + y = y * (y.square().mean(1, keepdim=True) + 1e-8).rsqrt() + x = torch.cat([x, y], dim=1) if x is not None else y + + # Execute layers. + for idx in range(self.num_layers): + x = getattr(self, f'fc{idx}')(x) + + # Update moving average of W. + if update_emas: + self.w_avg.copy_(x.detach().mean(dim=0).lerp(self.w_avg, self.w_avg_beta)) + + # Broadcast and apply truncation. + x = x.unsqueeze(1).repeat([1, self.num_ws, 1]) + if truncation_psi != 1: + x[:, :truncation_cutoff] = self.w_avg.lerp(x[:, :truncation_cutoff], truncation_psi) + return x + + def extra_repr(self): + return f'z_dim={self.z_dim:d}, c_dim={self.c_dim:d}, w_dim={self.w_dim:d}, num_ws={self.num_ws:d}' + +#---------------------------------------------------------------------------- + +@persistence.persistent_class +class SynthesisInput(torch.nn.Module): + def __init__(self, + w_dim, # Intermediate latent (W) dimensionality. + channels, # Number of output channels. + size, # Output spatial size: int or [width, height]. + sampling_rate, # Output sampling rate. + bandwidth, # Output bandwidth. + ): + super().__init__() + self.w_dim = w_dim + self.channels = channels + self.size = np.broadcast_to(np.asarray(size), [2]) + self.sampling_rate = sampling_rate + self.bandwidth = bandwidth + + # Draw random frequencies from uniform 2D disc. + freqs = torch.randn([self.channels, 2]) + radii = freqs.square().sum(dim=1, keepdim=True).sqrt() + freqs /= radii * radii.square().exp().pow(0.25) + freqs *= bandwidth + phases = torch.rand([self.channels]) - 0.5 + + # Setup parameters and buffers. + self.weight = torch.nn.Parameter(torch.randn([self.channels, self.channels])) + self.affine = FullyConnectedLayer(w_dim, 4, weight_init=0, bias_init=[1,0,0,0]) + self.register_buffer('transform', torch.eye(3, 3)) # User-specified inverse transform wrt. resulting image. + self.register_buffer('freqs', freqs) + self.register_buffer('phases', phases) + + def forward(self, w): + # Introduce batch dimension. + transforms = self.transform.unsqueeze(0) # [batch, row, col] + freqs = self.freqs.unsqueeze(0) # [batch, channel, xy] + phases = self.phases.unsqueeze(0) # [batch, channel] + + # Apply learned transformation. + t = self.affine(w) # t = (r_c, r_s, t_x, t_y) + t = t / t[:, :2].norm(dim=1, keepdim=True) # t' = (r'_c, r'_s, t'_x, t'_y) + m_r = torch.eye(3, device=w.device).unsqueeze(0).repeat([w.shape[0], 1, 1]) # Inverse rotation wrt. resulting image. + m_r[:, 0, 0] = t[:, 0] # r'_c + m_r[:, 0, 1] = -t[:, 1] # r'_s + m_r[:, 1, 0] = t[:, 1] # r'_s + m_r[:, 1, 1] = t[:, 0] # r'_c + m_t = torch.eye(3, device=w.device).unsqueeze(0).repeat([w.shape[0], 1, 1]) # Inverse translation wrt. resulting image. + m_t[:, 0, 2] = -t[:, 2] # t'_x + m_t[:, 1, 2] = -t[:, 3] # t'_y + transforms = m_r @ m_t @ transforms # First rotate resulting image, then translate, and finally apply user-specified transform. + + # Transform frequencies. + phases = phases + (freqs @ transforms[:, :2, 2:]).squeeze(2) + freqs = freqs @ transforms[:, :2, :2] + + # Dampen out-of-band frequencies that may occur due to the user-specified transform. + amplitudes = (1 - (freqs.norm(dim=2) - self.bandwidth) / (self.sampling_rate / 2 - self.bandwidth)).clamp(0, 1) + + # Construct sampling grid. + theta = torch.eye(2, 3, device=w.device) + theta[0, 0] = 0.5 * self.size[0] / self.sampling_rate + theta[1, 1] = 0.5 * self.size[1] / self.sampling_rate + grids = torch.nn.functional.affine_grid(theta.unsqueeze(0), [1, 1, self.size[1], self.size[0]], align_corners=False) + + # Compute Fourier features. + x = (grids.unsqueeze(3) @ freqs.permute(0, 2, 1).unsqueeze(1).unsqueeze(2)).squeeze(3) # [batch, height, width, channel] + x = x + phases.unsqueeze(1).unsqueeze(2) + x = torch.sin(x * (np.pi * 2)) + x = x * amplitudes.unsqueeze(1).unsqueeze(2) + + # Apply trainable mapping. + weight = self.weight / np.sqrt(self.channels) + x = x @ weight.t() + + # Ensure correct shape. + x = x.permute(0, 3, 1, 2) # [batch, channel, height, width] + misc.assert_shape(x, [w.shape[0], self.channels, int(self.size[1]), int(self.size[0])]) + return x + + def extra_repr(self): + return '\n'.join([ + f'w_dim={self.w_dim:d}, channels={self.channels:d}, size={list(self.size)},', + f'sampling_rate={self.sampling_rate:g}, bandwidth={self.bandwidth:g}']) + +#---------------------------------------------------------------------------- + +@persistence.persistent_class +class SynthesisLayer(torch.nn.Module): + def __init__(self, + w_dim, # Intermediate latent (W) dimensionality. + is_torgb, # Is this the final ToRGB layer? + is_critically_sampled, # Does this layer use critical sampling? + use_fp16, # Does this layer use FP16? + + # Input & output specifications. + in_channels, # Number of input channels. + out_channels, # Number of output channels. + in_size, # Input spatial size: int or [width, height]. + out_size, # Output spatial size: int or [width, height]. + in_sampling_rate, # Input sampling rate (s). + out_sampling_rate, # Output sampling rate (s). + in_cutoff, # Input cutoff frequency (f_c). + out_cutoff, # Output cutoff frequency (f_c). + in_half_width, # Input transition band half-width (f_h). + out_half_width, # Output Transition band half-width (f_h). + + # Hyperparameters. + conv_kernel = 3, # Convolution kernel size. Ignored for final the ToRGB layer. + filter_size = 6, # Low-pass filter size relative to the lower resolution when up/downsampling. + lrelu_upsampling = 2, # Relative sampling rate for leaky ReLU. Ignored for final the ToRGB layer. + use_radial_filters = False, # Use radially symmetric downsampling filter? Ignored for critically sampled layers. + conv_clamp = 256, # Clamp the output to [-X, +X], None = disable clamping. + magnitude_ema_beta = 0.999, # Decay rate for the moving average of input magnitudes. + ): + super().__init__() + self.w_dim = w_dim + self.is_torgb = is_torgb + self.is_critically_sampled = is_critically_sampled + self.use_fp16 = use_fp16 + self.in_channels = in_channels + self.out_channels = out_channels + self.in_size = np.broadcast_to(np.asarray(in_size), [2]) + self.out_size = np.broadcast_to(np.asarray(out_size), [2]) + self.in_sampling_rate = in_sampling_rate + self.out_sampling_rate = out_sampling_rate + self.tmp_sampling_rate = max(in_sampling_rate, out_sampling_rate) * (1 if is_torgb else lrelu_upsampling) + self.in_cutoff = in_cutoff + self.out_cutoff = out_cutoff + self.in_half_width = in_half_width + self.out_half_width = out_half_width + self.conv_kernel = 1 if is_torgb else conv_kernel + self.conv_clamp = conv_clamp + self.magnitude_ema_beta = magnitude_ema_beta + + # Setup parameters and buffers. + self.affine = FullyConnectedLayer(self.w_dim, self.in_channels, bias_init=1) + self.weight = torch.nn.Parameter(torch.randn([self.out_channels, self.in_channels, self.conv_kernel, self.conv_kernel])) + self.bias = torch.nn.Parameter(torch.zeros([self.out_channels])) + self.register_buffer('magnitude_ema', torch.ones([])) + + # Design upsampling filter. + self.up_factor = int(np.rint(self.tmp_sampling_rate / self.in_sampling_rate)) + assert self.in_sampling_rate * self.up_factor == self.tmp_sampling_rate + self.up_taps = filter_size * self.up_factor if self.up_factor > 1 and not self.is_torgb else 1 + self.register_buffer('up_filter', self.design_lowpass_filter( + numtaps=self.up_taps, cutoff=self.in_cutoff, width=self.in_half_width*2, fs=self.tmp_sampling_rate)) + + # Design downsampling filter. + self.down_factor = int(np.rint(self.tmp_sampling_rate / self.out_sampling_rate)) + assert self.out_sampling_rate * self.down_factor == self.tmp_sampling_rate + self.down_taps = filter_size * self.down_factor if self.down_factor > 1 and not self.is_torgb else 1 + self.down_radial = use_radial_filters and not self.is_critically_sampled + self.register_buffer('down_filter', self.design_lowpass_filter( + numtaps=self.down_taps, cutoff=self.out_cutoff, width=self.out_half_width*2, fs=self.tmp_sampling_rate, radial=self.down_radial)) + + # Compute padding. + pad_total = (self.out_size - 1) * self.down_factor + 1 # Desired output size before downsampling. + pad_total -= (self.in_size + self.conv_kernel - 1) * self.up_factor # Input size after upsampling. + pad_total += self.up_taps + self.down_taps - 2 # Size reduction caused by the filters. + pad_lo = (pad_total + self.up_factor) // 2 # Shift sample locations according to the symmetric interpretation (Appendix C.3). + pad_hi = pad_total - pad_lo + self.padding = [int(pad_lo[0]), int(pad_hi[0]), int(pad_lo[1]), int(pad_hi[1])] + + def forward(self, x, w, noise_mode='random', force_fp32=False, update_emas=False): + assert noise_mode in ['random', 'const', 'none'] # unused + misc.assert_shape(x, [None, self.in_channels, int(self.in_size[1]), int(self.in_size[0])]) + misc.assert_shape(w, [x.shape[0], self.w_dim]) + + # Track input magnitude. + if update_emas: + with torch.autograd.profiler.record_function('update_magnitude_ema'): + magnitude_cur = x.detach().to(torch.float32).square().mean() + self.magnitude_ema.copy_(magnitude_cur.lerp(self.magnitude_ema, self.magnitude_ema_beta)) + input_gain = self.magnitude_ema.rsqrt() + + # Execute affine layer. + styles = self.affine(w) + if self.is_torgb: + weight_gain = 1 / np.sqrt(self.in_channels * (self.conv_kernel ** 2)) + styles = styles * weight_gain + + # Execute modulated conv2d. + dtype = torch.float16 if (self.use_fp16 and not force_fp32 and x.device.type == 'cuda') else torch.float32 + x = modulated_conv2d(x=x.to(dtype), w=self.weight, s=styles, + padding=self.conv_kernel-1, demodulate=(not self.is_torgb), input_gain=input_gain) + + # Execute bias, filtered leaky ReLU, and clamping. + gain = 1 if self.is_torgb else np.sqrt(2) + slope = 1 if self.is_torgb else 0.2 + x = filtered_lrelu.filtered_lrelu(x=x, fu=self.up_filter, fd=self.down_filter, b=self.bias.to(x.dtype), + up=self.up_factor, down=self.down_factor, padding=self.padding, gain=gain, slope=slope, clamp=self.conv_clamp) + + # Ensure correct shape and dtype. + misc.assert_shape(x, [None, self.out_channels, int(self.out_size[1]), int(self.out_size[0])]) + assert x.dtype == dtype + return x + + @staticmethod + def design_lowpass_filter(numtaps, cutoff, width, fs, radial=False): + assert numtaps >= 1 + + # Identity filter. + if numtaps == 1: + return None + + # Separable Kaiser low-pass filter. + if not radial: + f = scipy.signal.firwin(numtaps=numtaps, cutoff=cutoff, width=width, fs=fs) + return torch.as_tensor(f, dtype=torch.float32) + + # Radially symmetric jinc-based filter. + x = (np.arange(numtaps) - (numtaps - 1) / 2) / fs + r = np.hypot(*np.meshgrid(x, x)) + f = scipy.special.j1(2 * cutoff * (np.pi * r)) / (np.pi * r) + beta = scipy.signal.kaiser_beta(scipy.signal.kaiser_atten(numtaps, width / (fs / 2))) + w = np.kaiser(numtaps, beta) + f *= np.outer(w, w) + f /= np.sum(f) + return torch.as_tensor(f, dtype=torch.float32) + + def extra_repr(self): + return '\n'.join([ + f'w_dim={self.w_dim:d}, is_torgb={self.is_torgb},', + f'is_critically_sampled={self.is_critically_sampled}, use_fp16={self.use_fp16},', + f'in_sampling_rate={self.in_sampling_rate:g}, out_sampling_rate={self.out_sampling_rate:g},', + f'in_cutoff={self.in_cutoff:g}, out_cutoff={self.out_cutoff:g},', + f'in_half_width={self.in_half_width:g}, out_half_width={self.out_half_width:g},', + f'in_size={list(self.in_size)}, out_size={list(self.out_size)},', + f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}']) + +#---------------------------------------------------------------------------- + +@persistence.persistent_class +class SynthesisNetwork(torch.nn.Module): + def __init__(self, + w_dim, # Intermediate latent (W) dimensionality. + img_resolution, # Output image resolution. + img_channels, # Number of color channels. + channel_base = 32768, # Overall multiplier for the number of channels. + channel_max = 512, # Maximum number of channels in any layer. + num_layers = 14, # Total number of layers, excluding Fourier features and ToRGB. + num_critical = 2, # Number of critically sampled layers at the end. + first_cutoff = 2, # Cutoff frequency of the first layer (f_{c,0}). + first_stopband = 2**2.1, # Minimum stopband of the first layer (f_{t,0}). + last_stopband_rel = 2**0.3, # Minimum stopband of the last layer, expressed relative to the cutoff. + margin_size = 10, # Number of additional pixels outside the image. + output_scale = 0.25, # Scale factor for the output image. + num_fp16_res = 4, # Use FP16 for the N highest resolutions. + **layer_kwargs, # Arguments for SynthesisLayer. + ): + super().__init__() + self.w_dim = w_dim + self.num_ws = num_layers + 2 + self.img_resolution = img_resolution + self.img_channels = img_channels + self.num_layers = num_layers + self.num_critical = num_critical + self.margin_size = margin_size + self.output_scale = output_scale + self.num_fp16_res = num_fp16_res + + # Geometric progression of layer cutoffs and min. stopbands. + last_cutoff = self.img_resolution / 2 # f_{c,N} + last_stopband = last_cutoff * last_stopband_rel # f_{t,N} + exponents = np.minimum(np.arange(self.num_layers + 1) / (self.num_layers - self.num_critical), 1) + cutoffs = first_cutoff * (last_cutoff / first_cutoff) ** exponents # f_c[i] + stopbands = first_stopband * (last_stopband / first_stopband) ** exponents # f_t[i] + + # Compute remaining layer parameters. + sampling_rates = np.exp2(np.ceil(np.log2(np.minimum(stopbands * 2, self.img_resolution)))) # s[i] + half_widths = np.maximum(stopbands, sampling_rates / 2) - cutoffs # f_h[i] + sizes = sampling_rates + self.margin_size * 2 + sizes[-2:] = self.img_resolution + channels = np.rint(np.minimum((channel_base / 2) / cutoffs, channel_max)) + channels[-1] = self.img_channels + + # Construct layers. + self.input = SynthesisInput( + w_dim=self.w_dim, channels=int(channels[0]), size=int(sizes[0]), + sampling_rate=sampling_rates[0], bandwidth=cutoffs[0]) + self.layer_names = [] + for idx in range(self.num_layers + 1): + prev = max(idx - 1, 0) + is_torgb = (idx == self.num_layers) + is_critically_sampled = (idx >= self.num_layers - self.num_critical) + use_fp16 = (sampling_rates[idx] * (2 ** self.num_fp16_res) > self.img_resolution) + layer = SynthesisLayer( + w_dim=self.w_dim, is_torgb=is_torgb, is_critically_sampled=is_critically_sampled, use_fp16=use_fp16, + in_channels=int(channels[prev]), out_channels= int(channels[idx]), + in_size=int(sizes[prev]), out_size=int(sizes[idx]), + in_sampling_rate=int(sampling_rates[prev]), out_sampling_rate=int(sampling_rates[idx]), + in_cutoff=cutoffs[prev], out_cutoff=cutoffs[idx], + in_half_width=half_widths[prev], out_half_width=half_widths[idx], + **layer_kwargs) + name = f'L{idx}_{layer.out_size[0]}_{layer.out_channels}' + setattr(self, name, layer) + self.layer_names.append(name) + + def forward(self, ws, **layer_kwargs): + misc.assert_shape(ws, [None, self.num_ws, self.w_dim]) + ws = ws.to(torch.float32).unbind(dim=1) + + # Execute layers. + x = self.input(ws[0]) + for name, w in zip(self.layer_names, ws[1:]): + x = getattr(self, name)(x, w, **layer_kwargs) + if self.output_scale != 1: + x = x * self.output_scale + + # Ensure correct shape and dtype. + misc.assert_shape(x, [None, self.img_channels, self.img_resolution, self.img_resolution]) + x = x.to(torch.float32) + return x + + def extra_repr(self): + return '\n'.join([ + f'w_dim={self.w_dim:d}, num_ws={self.num_ws:d},', + f'img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d},', + f'num_layers={self.num_layers:d}, num_critical={self.num_critical:d},', + f'margin_size={self.margin_size:d}, num_fp16_res={self.num_fp16_res:d}']) + +#---------------------------------------------------------------------------- + +@persistence.persistent_class +class StyleGAN3GeneratorModel(torch.nn.Module): + def __init__(self, + z_dim, # Input latent (Z) dimensionality. + c_dim, # Conditioning label (C) dimensionality. + w_dim, # Intermediate latent (W) dimensionality. + img_resolution, # Output resolution. + img_channels, # Number of output color channels. + mapping_kwargs = {}, # Arguments for MappingNetwork. + **synthesis_kwargs, # Arguments for SynthesisNetwork. + ): + super().__init__() + self.z_dim = z_dim + self.c_dim = c_dim + self.w_dim = w_dim + self.img_resolution = img_resolution + self.img_channels = img_channels + self.synthesis = SynthesisNetwork(w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, **synthesis_kwargs) + self.num_ws = self.synthesis.num_ws + self.mapping = MappingNetwork(z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs) + + def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False, **synthesis_kwargs): + ws = self.mapping(z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas) + img = self.synthesis(ws, update_emas=update_emas, **synthesis_kwargs) + return img diff --git a/models/stylegan_generator.py b/models/stylegan_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..8296946f9825d8d389afea97dfe5c7c926f425e9 --- /dev/null +++ b/models/stylegan_generator.py @@ -0,0 +1,283 @@ +# python3.7 +"""Contains the generator class of StyleGAN. + +Basically, this class is derived from the `BaseGenerator` class defined in +`base_generator.py`. +""" + +import os +import numpy as np + +import torch + +from . import model_settings +from .stylegan_generator_model import StyleGANGeneratorModel +from .base_generator import BaseGenerator + +__all__ = ['StyleGANGenerator'] + + +class StyleGANGenerator(BaseGenerator): + """Defines the generator class of StyleGAN. + + Different from conventional GAN, StyleGAN introduces a disentangled latent + space (i.e., W space) besides the normal latent space (i.e., Z space). Then, + the disentangled latent code, w, is fed into each convolutional layer to + modulate the `style` of the synthesis through AdaIN (Adaptive Instance + Normalization) layer. Normally, the w's fed into all layers are the same. But, + they can actually be different to make different layers get different styles. + Accordingly, an extended space (i.e. W+ space) is used to gather all w's + together. Taking the official StyleGAN model trained on FF-HQ dataset as an + instance, there are + (1) Z space, with dimension (512,) + (2) W space, with dimension (512,) + (3) W+ space, with dimension (18, 512) + """ + + def __init__(self, model_name, logger=None): + self.truncation_psi = model_settings.STYLEGAN_TRUNCATION_PSI + self.truncation_layers = model_settings.STYLEGAN_TRUNCATION_LAYERS + self.randomize_noise = model_settings.STYLEGAN_RANDOMIZE_NOISE + self.model_specific_vars = ['truncation.truncation'] + super().__init__(model_name, logger) + self.num_layers = (int(np.log2(self.resolution)) - 1) * 2 + assert self.gan_type == 'stylegan' + + def build(self): + self.check_attr('w_space_dim') + self.check_attr('fused_scale') + self.model = StyleGANGeneratorModel( + resolution=self.resolution, + w_space_dim=self.w_space_dim, + fused_scale=self.fused_scale, + output_channels=self.output_channels, + truncation_psi=self.truncation_psi, + truncation_layers=self.truncation_layers, + randomize_noise=self.randomize_noise) + + def load(self): + self.logger.info(f'Loading pytorch model from `{self.model_path}`.') + state_dict = torch.load(self.model_path) + for var_name in self.model_specific_vars: + state_dict[var_name] = self.model.state_dict()[var_name] + self.model.load_state_dict(state_dict) + self.logger.info(f'Successfully loaded!') + self.lod = self.model.synthesis.lod.to(self.cpu_device).tolist() + self.logger.info(f' `lod` of the loaded model is {self.lod}.') + + def convert_tf_model(self, test_num=10): + import sys + import pickle + import tensorflow as tf + os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' + sys.path.append(model_settings.BASE_DIR + '/stylegan_tf_official') + + self.logger.info(f'Loading tensorflow model from `{self.tf_model_path}`.') + tf.InteractiveSession() + with open(self.tf_model_path, 'rb') as f: + _, _, tf_model = pickle.load(f) + self.logger.info(f'Successfully loaded!') + + self.logger.info(f'Converting tensorflow model to pytorch version.') + tf_vars = dict(tf_model.__getstate__()['variables']) + tf_vars.update( + dict(tf_model.components.mapping.__getstate__()['variables'])) + tf_vars.update( + dict(tf_model.components.synthesis.__getstate__()['variables'])) + state_dict = self.model.state_dict() + for pth_var_name, tf_var_name in self.model.pth_to_tf_var_mapping.items(): + if 'ToRGB_lod' in tf_var_name: + lod = int(tf_var_name[len('ToRGB_lod')]) + lod_shift = 10 - int(np.log2(self.resolution)) + tf_var_name = tf_var_name.replace(f'{lod}', f'{lod - lod_shift}') + if tf_var_name not in tf_vars: + self.logger.debug(f'Variable `{tf_var_name}` does not exist in ' + f'tensorflow model.') + continue + self.logger.debug(f' Converting `{tf_var_name}` to `{pth_var_name}`.') + var = torch.from_numpy(np.array(tf_vars[tf_var_name])) + if 'weight' in pth_var_name: + if 'dense' in pth_var_name: + var = var.permute(1, 0) + elif 'conv' in pth_var_name: + var = var.permute(3, 2, 0, 1) + state_dict[pth_var_name] = var + self.logger.info(f'Successfully converted!') + + self.logger.info(f'Saving pytorch model to `{self.model_path}`.') + for var_name in self.model_specific_vars: + del state_dict[var_name] + torch.save(state_dict, self.model_path) + self.logger.info(f'Successfully saved!') + + self.load() + + # Official tensorflow model can only run on GPU. + if test_num <= 0 or not tf.test.is_built_with_cuda(): + return + self.logger.info(f'Testing conversion results.') + self.model.eval().to(self.run_device) + total_distance = 0.0 + for i in range(test_num): + latent_code = self.easy_sample(1) + tf_output = tf_model.run(latent_code, # latents_in + None, # labels_in + truncation_psi=self.truncation_psi, + truncation_cutoff=self.truncation_layers, + randomize_noise=self.randomize_noise) + pth_output = self.synthesize(latent_code)['image'] + distance = np.average(np.abs(tf_output - pth_output)) + self.logger.debug(f' Test {i:03d}: distance {distance:.6e}.') + total_distance += distance + self.logger.info(f'Average distance is {total_distance / test_num:.6e}.') + + def sample(self, num, latent_space_type='Z'): + """Samples latent codes randomly. + + Args: + num: Number of latent codes to sample. Should be positive. + latent_space_type: Type of latent space from which to sample latent code. + Only [`Z`, `W`, `WP`] are supported. Case insensitive. (default: `Z`) + + Returns: + A `numpy.ndarray` as sampled latend codes. + + Raises: + ValueError: If the given `latent_space_type` is not supported. + """ + latent_space_type = latent_space_type.upper() + if latent_space_type == 'Z': + latent_codes = np.random.randn(num, self.latent_space_dim) + elif latent_space_type == 'W': + latent_codes = np.random.randn(num, self.w_space_dim) + elif latent_space_type == 'WP': + latent_codes = np.random.randn(num, self.num_layers, self.w_space_dim) + else: + raise ValueError(f'Latent space type `{latent_space_type}` is invalid!') + + return latent_codes.astype(np.float32) + + def preprocess(self, latent_codes, latent_space_type='Z'): + """Preprocesses the input latent code if needed. + + Args: + latent_codes: The input latent codes for preprocessing. + latent_space_type: Type of latent space to which the latent codes belong. + Only [`Z`, `W`, `WP`] are supported. Case insensitive. (default: `Z`) + + Returns: + The preprocessed latent codes which can be used as final input for the + generator. + + Raises: + ValueError: If the given `latent_space_type` is not supported. + """ + if not isinstance(latent_codes, np.ndarray): + raise ValueError(f'Latent codes should be with type `numpy.ndarray`!') + + latent_space_type = latent_space_type.upper() + if latent_space_type == 'Z': + latent_codes = latent_codes.reshape(-1, self.latent_space_dim) + norm = np.linalg.norm(latent_codes, axis=1, keepdims=True) + latent_codes = latent_codes / norm * np.sqrt(self.latent_space_dim) + elif latent_space_type == 'W': + latent_codes = latent_codes.reshape(-1, self.w_space_dim) + elif latent_space_type == 'WP': + latent_codes = latent_codes.reshape(-1, self.num_layers, self.w_space_dim) + else: + raise ValueError(f'Latent space type `{latent_space_type}` is invalid!') + + return latent_codes.astype(np.float32) + + def easy_sample(self, num, latent_space_type='Z'): + return self.preprocess(self.sample(num, latent_space_type), + latent_space_type) + + def synthesize(self, + latent_codes, + latent_space_type='Z', + generate_style=False, + generate_image=True): + """Synthesizes images with given latent codes. + + One can choose whether to generate the layer-wise style codes. + + Args: + latent_codes: Input latent codes for image synthesis. + latent_space_type: Type of latent space to which the latent codes belong. + Only [`Z`, `W`, `WP`] are supported. Case insensitive. (default: `Z`) + generate_style: Whether to generate the layer-wise style codes. (default: + False) + generate_image: Whether to generate the final image synthesis. (default: + True) + + Returns: + A dictionary whose values are raw outputs from the generator. + """ + if not isinstance(latent_codes, np.ndarray): + raise ValueError(f'Latent codes should be with type `numpy.ndarray`!') + + results = {} + + latent_space_type = latent_space_type.upper() + latent_codes_shape = latent_codes.shape + # Generate from Z space. + if latent_space_type == 'Z': + if not (len(latent_codes_shape) == 2 and + latent_codes_shape[0] <= self.batch_size and + latent_codes_shape[1] == self.latent_space_dim): + raise ValueError(f'Latent_codes should be with shape [batch_size, ' + f'latent_space_dim], where `batch_size` no larger ' + f'than {self.batch_size}, and `latent_space_dim` ' + f'equal to {self.latent_space_dim}!\n' + f'But {latent_codes_shape} received!') + zs = torch.from_numpy(latent_codes).type(torch.FloatTensor) + zs = zs.to(self.run_device) + ws = self.model.mapping(zs) + wps = self.model.truncation(ws) + results['z'] = latent_codes + results['w'] = self.get_value(ws) + results['wp'] = self.get_value(wps) + # Generate from W space. + elif latent_space_type == 'W': + if not (len(latent_codes_shape) == 2 and + latent_codes_shape[0] <= self.batch_size and + latent_codes_shape[1] == self.w_space_dim): + raise ValueError(f'Latent_codes should be with shape [batch_size, ' + f'w_space_dim], where `batch_size` no larger than ' + f'{self.batch_size}, and `w_space_dim` equal to ' + f'{self.w_space_dim}!\n' + f'But {latent_codes_shape} received!') + ws = torch.from_numpy(latent_codes).type(torch.FloatTensor) + ws = ws.to(self.run_device) + wps = self.model.truncation(ws) + results['w'] = latent_codes + results['wp'] = self.get_value(wps) + # Generate from W+ space. + elif latent_space_type == 'WP': + if not (len(latent_codes_shape) == 3 and + latent_codes_shape[0] <= self.batch_size and + latent_codes_shape[1] == self.num_layers and + latent_codes_shape[2] == self.w_space_dim): + raise ValueError(f'Latent_codes should be with shape [batch_size, ' + f'num_layers, w_space_dim], where `batch_size` no ' + f'larger than {self.batch_size}, `num_layers` equal ' + f'to {self.num_layers}, and `w_space_dim` equal to ' + f'{self.w_space_dim}!\n' + f'But {latent_codes_shape} received!') + wps = torch.from_numpy(latent_codes).type(torch.FloatTensor) + wps = wps.to(self.run_device) + results['wp'] = latent_codes + else: + raise ValueError(f'Latent space type `{latent_space_type}` is invalid!') + + if generate_style: + for i in range(self.num_layers): + style = self.model.synthesis.__getattr__( + f'layer{i}').epilogue.style_mod.dense(wps[:, i, :]) + results[f'style{i:02d}'] = self.get_value(style) + + if generate_image: + images = self.model.synthesis(wps) + results['image'] = self.get_value(images) + return results diff --git a/models/stylegan_generator_model.py b/models/stylegan_generator_model.py new file mode 100644 index 0000000000000000000000000000000000000000..718ffbe70f7e4b17d12620266f5f334222535adc --- /dev/null +++ b/models/stylegan_generator_model.py @@ -0,0 +1,815 @@ +# python3.7 +"""Contains the implementation of generator described in StyleGAN. + +Different from the official tensorflow model in folder `stylegan_tf_official`, +this is a simple pytorch version which only contains the generator part. This +class is specially used for inference. + +For more details, please check the original paper: +https://arxiv.org/pdf/1812.04948.pdf +""" + +from collections import OrderedDict +import numpy as np + +import torch +import torch.nn as nn +import torch.nn.functional as F + +__all__ = ['StyleGANGeneratorModel'] + +# Defines a dictionary, which maps the target resolution of the final generated +# image to numbers of filters used in each convolutional layer in sequence. +_RESOLUTIONS_TO_CHANNELS = { + 8: [512, 512, 512], + 16: [512, 512, 512, 512], + 32: [512, 512, 512, 512, 512], + 64: [512, 512, 512, 512, 512, 256], + 128: [512, 512, 512, 512, 512, 256, 128], + 256: [512, 512, 512, 512, 512, 256, 128, 64], + 512: [512, 512, 512, 512, 512, 256, 128, 64, 32], + 1024: [512, 512, 512, 512, 512, 256, 128, 64, 32, 16], +} + +# pylint: disable=line-too-long +# Variable mapping from pytorch model to official tensorflow model. +_STYLEGAN_PTH_VARS_TO_TF_VARS = { + # Statistic information of disentangled latent feature, w. + 'truncation.w_avg':'dlatent_avg', # [512] + + # Noises. + 'synthesis.layer0.epilogue.apply_noise.noise': 'noise0', # [1, 1, 4, 4] + 'synthesis.layer1.epilogue.apply_noise.noise': 'noise1', # [1, 1, 4, 4] + 'synthesis.layer2.epilogue.apply_noise.noise': 'noise2', # [1, 1, 8, 8] + 'synthesis.layer3.epilogue.apply_noise.noise': 'noise3', # [1, 1, 8, 8] + 'synthesis.layer4.epilogue.apply_noise.noise': 'noise4', # [1, 1, 16, 16] + 'synthesis.layer5.epilogue.apply_noise.noise': 'noise5', # [1, 1, 16, 16] + 'synthesis.layer6.epilogue.apply_noise.noise': 'noise6', # [1, 1, 32, 32] + 'synthesis.layer7.epilogue.apply_noise.noise': 'noise7', # [1, 1, 32, 32] + 'synthesis.layer8.epilogue.apply_noise.noise': 'noise8', # [1, 1, 64, 64] + 'synthesis.layer9.epilogue.apply_noise.noise': 'noise9', # [1, 1, 64, 64] + 'synthesis.layer10.epilogue.apply_noise.noise': 'noise10', # [1, 1, 128, 128] + 'synthesis.layer11.epilogue.apply_noise.noise': 'noise11', # [1, 1, 128, 128] + 'synthesis.layer12.epilogue.apply_noise.noise': 'noise12', # [1, 1, 256, 256] + 'synthesis.layer13.epilogue.apply_noise.noise': 'noise13', # [1, 1, 256, 256] + 'synthesis.layer14.epilogue.apply_noise.noise': 'noise14', # [1, 1, 512, 512] + 'synthesis.layer15.epilogue.apply_noise.noise': 'noise15', # [1, 1, 512, 512] + 'synthesis.layer16.epilogue.apply_noise.noise': 'noise16', # [1, 1, 1024, 1024] + 'synthesis.layer17.epilogue.apply_noise.noise': 'noise17', # [1, 1, 1024, 1024] + + # Mapping blocks. + 'mapping.dense0.linear.weight': 'Dense0/weight', # [512, 512] + 'mapping.dense0.wscale.bias': 'Dense0/bias', # [512] + 'mapping.dense1.linear.weight': 'Dense1/weight', # [512, 512] + 'mapping.dense1.wscale.bias': 'Dense1/bias', # [512] + 'mapping.dense2.linear.weight': 'Dense2/weight', # [512, 512] + 'mapping.dense2.wscale.bias': 'Dense2/bias', # [512] + 'mapping.dense3.linear.weight': 'Dense3/weight', # [512, 512] + 'mapping.dense3.wscale.bias': 'Dense3/bias', # [512] + 'mapping.dense4.linear.weight': 'Dense4/weight', # [512, 512] + 'mapping.dense4.wscale.bias': 'Dense4/bias', # [512] + 'mapping.dense5.linear.weight': 'Dense5/weight', # [512, 512] + 'mapping.dense5.wscale.bias': 'Dense5/bias', # [512] + 'mapping.dense6.linear.weight': 'Dense6/weight', # [512, 512] + 'mapping.dense6.wscale.bias': 'Dense6/bias', # [512] + 'mapping.dense7.linear.weight': 'Dense7/weight', # [512, 512] + 'mapping.dense7.wscale.bias': 'Dense7/bias', # [512] + + # Synthesis blocks. + 'synthesis.lod': 'lod', # [] + 'synthesis.layer0.first_layer': '4x4/Const/const', # [1, 512, 4, 4] + 'synthesis.layer0.epilogue.apply_noise.weight': '4x4/Const/Noise/weight', # [512] + 'synthesis.layer0.epilogue.bias': '4x4/Const/bias', # [512] + 'synthesis.layer0.epilogue.style_mod.dense.linear.weight': '4x4/Const/StyleMod/weight', # [1024, 512] + 'synthesis.layer0.epilogue.style_mod.dense.wscale.bias': '4x4/Const/StyleMod/bias', # [1024] + 'synthesis.layer1.conv.weight': '4x4/Conv/weight', # [512, 512, 3, 3] + 'synthesis.layer1.epilogue.apply_noise.weight': '4x4/Conv/Noise/weight', # [512] + 'synthesis.layer1.epilogue.bias': '4x4/Conv/bias', # [512] + 'synthesis.layer1.epilogue.style_mod.dense.linear.weight': '4x4/Conv/StyleMod/weight', # [1024, 512] + 'synthesis.layer1.epilogue.style_mod.dense.wscale.bias': '4x4/Conv/StyleMod/bias', # [1024] + 'synthesis.layer2.conv.weight': '8x8/Conv0_up/weight', # [512, 512, 3, 3] + 'synthesis.layer2.epilogue.apply_noise.weight': '8x8/Conv0_up/Noise/weight', # [512] + 'synthesis.layer2.epilogue.bias': '8x8/Conv0_up/bias', # [512] + 'synthesis.layer2.epilogue.style_mod.dense.linear.weight': '8x8/Conv0_up/StyleMod/weight', # [1024, 512] + 'synthesis.layer2.epilogue.style_mod.dense.wscale.bias': '8x8/Conv0_up/StyleMod/bias', # [1024] + 'synthesis.layer3.conv.weight': '8x8/Conv1/weight', # [512, 512, 3, 3] + 'synthesis.layer3.epilogue.apply_noise.weight': '8x8/Conv1/Noise/weight', # [512] + 'synthesis.layer3.epilogue.bias': '8x8/Conv1/bias', # [512] + 'synthesis.layer3.epilogue.style_mod.dense.linear.weight': '8x8/Conv1/StyleMod/weight', # [1024, 512] + 'synthesis.layer3.epilogue.style_mod.dense.wscale.bias': '8x8/Conv1/StyleMod/bias', # [1024] + 'synthesis.layer4.conv.weight': '16x16/Conv0_up/weight', # [512, 512, 3, 3] + 'synthesis.layer4.epilogue.apply_noise.weight': '16x16/Conv0_up/Noise/weight', # [512] + 'synthesis.layer4.epilogue.bias': '16x16/Conv0_up/bias', # [512] + 'synthesis.layer4.epilogue.style_mod.dense.linear.weight': '16x16/Conv0_up/StyleMod/weight', # [1024, 512] + 'synthesis.layer4.epilogue.style_mod.dense.wscale.bias': '16x16/Conv0_up/StyleMod/bias', # [1024] + 'synthesis.layer5.conv.weight': '16x16/Conv1/weight', # [512, 512, 3, 3] + 'synthesis.layer5.epilogue.apply_noise.weight': '16x16/Conv1/Noise/weight', # [512] + 'synthesis.layer5.epilogue.bias': '16x16/Conv1/bias', # [512] + 'synthesis.layer5.epilogue.style_mod.dense.linear.weight': '16x16/Conv1/StyleMod/weight', # [1024, 512] + 'synthesis.layer5.epilogue.style_mod.dense.wscale.bias': '16x16/Conv1/StyleMod/bias', # [1024] + 'synthesis.layer6.conv.weight': '32x32/Conv0_up/weight', # [512, 512, 3, 3] + 'synthesis.layer6.epilogue.apply_noise.weight': '32x32/Conv0_up/Noise/weight', # [512] + 'synthesis.layer6.epilogue.bias': '32x32/Conv0_up/bias', # [512] + 'synthesis.layer6.epilogue.style_mod.dense.linear.weight': '32x32/Conv0_up/StyleMod/weight', # [1024, 512] + 'synthesis.layer6.epilogue.style_mod.dense.wscale.bias': '32x32/Conv0_up/StyleMod/bias', # [1024] + 'synthesis.layer7.conv.weight': '32x32/Conv1/weight', # [512, 512, 3, 3] + 'synthesis.layer7.epilogue.apply_noise.weight': '32x32/Conv1/Noise/weight', # [512] + 'synthesis.layer7.epilogue.bias': '32x32/Conv1/bias', # [512] + 'synthesis.layer7.epilogue.style_mod.dense.linear.weight': '32x32/Conv1/StyleMod/weight', # [1024, 512] + 'synthesis.layer7.epilogue.style_mod.dense.wscale.bias': '32x32/Conv1/StyleMod/bias', # [1024] + 'synthesis.layer8.conv.weight': '64x64/Conv0_up/weight', # [256, 512, 3, 3] + 'synthesis.layer8.epilogue.apply_noise.weight': '64x64/Conv0_up/Noise/weight', # [256] + 'synthesis.layer8.epilogue.bias': '64x64/Conv0_up/bias', # [256] + 'synthesis.layer8.epilogue.style_mod.dense.linear.weight': '64x64/Conv0_up/StyleMod/weight', # [512, 512] + 'synthesis.layer8.epilogue.style_mod.dense.wscale.bias': '64x64/Conv0_up/StyleMod/bias', # [512] + 'synthesis.layer9.conv.weight': '64x64/Conv1/weight', # [256, 256, 3, 3] + 'synthesis.layer9.epilogue.apply_noise.weight': '64x64/Conv1/Noise/weight', # [256] + 'synthesis.layer9.epilogue.bias': '64x64/Conv1/bias', # [256] + 'synthesis.layer9.epilogue.style_mod.dense.linear.weight': '64x64/Conv1/StyleMod/weight', # [512, 512] + 'synthesis.layer9.epilogue.style_mod.dense.wscale.bias': '64x64/Conv1/StyleMod/bias', # [512] + 'synthesis.layer10.conv.weight': '128x128/Conv0_up/weight', # [128, 256, 3, 3] + 'synthesis.layer10.epilogue.apply_noise.weight': '128x128/Conv0_up/Noise/weight', # [128] + 'synthesis.layer10.epilogue.bias': '128x128/Conv0_up/bias', # [128] + 'synthesis.layer10.epilogue.style_mod.dense.linear.weight': '128x128/Conv0_up/StyleMod/weight', # [256, 512] + 'synthesis.layer10.epilogue.style_mod.dense.wscale.bias': '128x128/Conv0_up/StyleMod/bias', # [256] + 'synthesis.layer11.conv.weight': '128x128/Conv1/weight', # [128, 128, 3, 3] + 'synthesis.layer11.epilogue.apply_noise.weight': '128x128/Conv1/Noise/weight', # [128] + 'synthesis.layer11.epilogue.bias': '128x128/Conv1/bias', # [128] + 'synthesis.layer11.epilogue.style_mod.dense.linear.weight': '128x128/Conv1/StyleMod/weight', # [256, 512] + 'synthesis.layer11.epilogue.style_mod.dense.wscale.bias': '128x128/Conv1/StyleMod/bias', # [256] + 'synthesis.layer12.conv.weight': '256x256/Conv0_up/weight', # [64, 128, 3, 3] + 'synthesis.layer12.epilogue.apply_noise.weight': '256x256/Conv0_up/Noise/weight', # [64] + 'synthesis.layer12.epilogue.bias': '256x256/Conv0_up/bias', # [64] + 'synthesis.layer12.epilogue.style_mod.dense.linear.weight': '256x256/Conv0_up/StyleMod/weight', # [128, 512] + 'synthesis.layer12.epilogue.style_mod.dense.wscale.bias': '256x256/Conv0_up/StyleMod/bias', # [128] + 'synthesis.layer13.conv.weight': '256x256/Conv1/weight', # [64, 64, 3, 3] + 'synthesis.layer13.epilogue.apply_noise.weight': '256x256/Conv1/Noise/weight', # [64] + 'synthesis.layer13.epilogue.bias': '256x256/Conv1/bias', # [64] + 'synthesis.layer13.epilogue.style_mod.dense.linear.weight': '256x256/Conv1/StyleMod/weight', # [128, 512] + 'synthesis.layer13.epilogue.style_mod.dense.wscale.bias': '256x256/Conv1/StyleMod/bias', # [128] + 'synthesis.layer14.conv.weight': '512x512/Conv0_up/weight', # [32, 64, 3, 3] + 'synthesis.layer14.epilogue.apply_noise.weight': '512x512/Conv0_up/Noise/weight', # [32] + 'synthesis.layer14.epilogue.bias': '512x512/Conv0_up/bias', # [32] + 'synthesis.layer14.epilogue.style_mod.dense.linear.weight': '512x512/Conv0_up/StyleMod/weight', # [64, 512] + 'synthesis.layer14.epilogue.style_mod.dense.wscale.bias': '512x512/Conv0_up/StyleMod/bias', # [64] + 'synthesis.layer15.conv.weight': '512x512/Conv1/weight', # [32, 32, 3, 3] + 'synthesis.layer15.epilogue.apply_noise.weight': '512x512/Conv1/Noise/weight', # [32] + 'synthesis.layer15.epilogue.bias': '512x512/Conv1/bias', # [32] + 'synthesis.layer15.epilogue.style_mod.dense.linear.weight': '512x512/Conv1/StyleMod/weight', # [64, 512] + 'synthesis.layer15.epilogue.style_mod.dense.wscale.bias': '512x512/Conv1/StyleMod/bias', # [64] + 'synthesis.layer16.conv.weight': '1024x1024/Conv0_up/weight', # [16, 32, 3, 3] + 'synthesis.layer16.epilogue.apply_noise.weight': '1024x1024/Conv0_up/Noise/weight', # [16] + 'synthesis.layer16.epilogue.bias': '1024x1024/Conv0_up/bias', # [16] + 'synthesis.layer16.epilogue.style_mod.dense.linear.weight': '1024x1024/Conv0_up/StyleMod/weight', # [32, 512] + 'synthesis.layer16.epilogue.style_mod.dense.wscale.bias': '1024x1024/Conv0_up/StyleMod/bias', # [32] + 'synthesis.layer17.conv.weight': '1024x1024/Conv1/weight', # [16, 16, 3, 3] + 'synthesis.layer17.epilogue.apply_noise.weight': '1024x1024/Conv1/Noise/weight', # [16] + 'synthesis.layer17.epilogue.bias': '1024x1024/Conv1/bias', # [16] + 'synthesis.layer17.epilogue.style_mod.dense.linear.weight': '1024x1024/Conv1/StyleMod/weight', # [32, 512] + 'synthesis.layer17.epilogue.style_mod.dense.wscale.bias': '1024x1024/Conv1/StyleMod/bias', # [32] + 'synthesis.output0.conv.weight': 'ToRGB_lod8/weight', # [3, 512, 1, 1] + 'synthesis.output0.bias': 'ToRGB_lod8/bias', # [3] + 'synthesis.output1.conv.weight': 'ToRGB_lod7/weight', # [3, 512, 1, 1] + 'synthesis.output1.bias': 'ToRGB_lod7/bias', # [3] + 'synthesis.output2.conv.weight': 'ToRGB_lod6/weight', # [3, 512, 1, 1] + 'synthesis.output2.bias': 'ToRGB_lod6/bias', # [3] + 'synthesis.output3.conv.weight': 'ToRGB_lod5/weight', # [3, 512, 1, 1] + 'synthesis.output3.bias': 'ToRGB_lod5/bias', # [3] + 'synthesis.output4.conv.weight': 'ToRGB_lod4/weight', # [3, 256, 1, 1] + 'synthesis.output4.bias': 'ToRGB_lod4/bias', # [3] + 'synthesis.output5.conv.weight': 'ToRGB_lod3/weight', # [3, 128, 1, 1] + 'synthesis.output5.bias': 'ToRGB_lod3/bias', # [3] + 'synthesis.output6.conv.weight': 'ToRGB_lod2/weight', # [3, 64, 1, 1] + 'synthesis.output6.bias': 'ToRGB_lod2/bias', # [3] + 'synthesis.output7.conv.weight': 'ToRGB_lod1/weight', # [3, 32, 1, 1] + 'synthesis.output7.bias': 'ToRGB_lod1/bias', # [3] + 'synthesis.output8.conv.weight': 'ToRGB_lod0/weight', # [3, 16, 1, 1] + 'synthesis.output8.bias': 'ToRGB_lod0/bias', # [3] +} +# pylint: enable=line-too-long + +# Minimal resolution for `auto` fused-scale strategy. +_AUTO_FUSED_SCALE_MIN_RES = 128 + + +class StyleGANGeneratorModel(nn.Module): + """Defines the generator module in StyleGAN. + + Note that the generated images are with RGB color channels. + """ + + def __init__(self, + resolution=1024, + w_space_dim=512, + fused_scale='auto', + output_channels=3, + truncation_psi=0.7, + truncation_layers=8, + randomize_noise=False): + """Initializes the generator with basic settings. + + Args: + resolution: The resolution of the final output image. (default: 1024) + w_space_dim: The dimension of the disentangled latent vectors, w. + (default: 512) + fused_scale: If set as `True`, `conv2d_transpose` is used for upscaling. + If set as `False`, `upsample + conv2d` is used for upscaling. If set as + `auto`, `upsample + conv2d` is used for bottom layers until resolution + reaches 128. (default: `auto`) + output_channels: Number of channels of output image. (default: 3) + truncation_psi: Style strength multiplier for the truncation trick. + `None` or `1.0` indicates no truncation. (default: 0.7) + truncation_layers: Number of layers for which to apply the truncation + trick. `None` indicates no truncation. (default: 8) + randomize_noise: Whether to add random noise for each convolutional layer. + (default: False) + + Raises: + ValueError: If the input `resolution` is not supported. + """ + super().__init__() + self.resolution = resolution + self.w_space_dim = w_space_dim + self.fused_scale = fused_scale + self.output_channels = output_channels + self.truncation_psi = truncation_psi + self.truncation_layers = truncation_layers + self.randomize_noise = randomize_noise + + self.mapping = MappingModule(final_space_dim=self.w_space_dim) + self.truncation = TruncationModule(resolution=self.resolution, + w_space_dim=self.w_space_dim, + truncation_psi=self.truncation_psi, + truncation_layers=self.truncation_layers) + self.synthesis = SynthesisModule(resolution=self.resolution, + fused_scale=self.fused_scale, + output_channels=self.output_channels, + randomize_noise=self.randomize_noise) + + self.pth_to_tf_var_mapping = {} + for pth_var_name, tf_var_name in _STYLEGAN_PTH_VARS_TO_TF_VARS.items(): + if 'Conv0_up' in tf_var_name: + res = int(tf_var_name.split('x')[0]) + if ((self.fused_scale is True) or + (self.fused_scale == 'auto' and res >= _AUTO_FUSED_SCALE_MIN_RES)): + pth_var_name = pth_var_name.replace('conv.weight', 'weight') + self.pth_to_tf_var_mapping[pth_var_name] = tf_var_name + + def forward(self, z): + w = self.mapping(z) + w = self.truncation(w) + x = self.synthesis(w) + return x + + +class MappingModule(nn.Sequential): + """Implements the latent space mapping module used in StyleGAN. + + Basically, this module executes several dense layers in sequence. + """ + + def __init__(self, + normalize_input=True, + input_space_dim=512, + hidden_space_dim=512, + final_space_dim=512, + num_layers=8): + sequence = OrderedDict() + + def _add_layer(layer, name=None): + name = name or f'dense{len(sequence) + (not normalize_input) - 1}' + sequence[name] = layer + + if normalize_input: + _add_layer(PixelNormLayer(), name='normalize') + for i in range(num_layers): + in_dim = input_space_dim if i == 0 else hidden_space_dim + out_dim = final_space_dim if i == (num_layers - 1) else hidden_space_dim + _add_layer(DenseBlock(in_dim, out_dim)) + super().__init__(sequence) + + def forward(self, x): + if len(x.shape) != 2: + raise ValueError(f'The input tensor should be with shape [batch_size, ' + f'noise_dim], but {x.shape} received!') + return super().forward(x) + + +class TruncationModule(nn.Module): + """Implements the truncation module used in StyleGAN.""" + + def __init__(self, + resolution=1024, + w_space_dim=512, + truncation_psi=0.7, + truncation_layers=8): + super().__init__() + + self.num_layers = int(np.log2(resolution)) * 2 - 2 + self.w_space_dim = w_space_dim + if truncation_psi is not None and truncation_layers is not None: + self.use_truncation = True + else: + self.use_truncation = False + truncation_psi = 1.0 + truncation_layers = 0 + self.register_buffer('w_avg', torch.zeros(w_space_dim)) + layer_idx = np.arange(self.num_layers).reshape(1, self.num_layers, 1) + coefs = np.ones_like(layer_idx, dtype=np.float32) + coefs[layer_idx < truncation_layers] *= truncation_psi + self.register_buffer('truncation', torch.from_numpy(coefs)) + + def forward(self, w): + if len(w.shape) == 2: + w = w.view(-1, 1, self.w_space_dim).repeat(1, self.num_layers, 1) + if self.use_truncation: + w_avg = self.w_avg.view(1, 1, self.w_space_dim) + w = w_avg + (w - w_avg) * self.truncation + return w + + +class SynthesisModule(nn.Module): + """Implements the image synthesis module used in StyleGAN. + + Basically, this module executes several convolutional layers in sequence. + """ + + def __init__(self, + resolution=1024, + fused_scale='auto', + output_channels=3, + randomize_noise=False): + super().__init__() + + try: + self.channels = _RESOLUTIONS_TO_CHANNELS[resolution] + except KeyError: + raise ValueError(f'Invalid resolution: {resolution}!\n' + f'Resolutions allowed: ' + f'{list(_RESOLUTIONS_TO_CHANNELS)}.') + assert len(self.channels) == int(np.log2(resolution)) + + for block_idx in range(1, len(self.channels)): + if block_idx == 1: + self.add_module( + f'layer{2 * block_idx - 2}', + FirstConvBlock(in_channels=self.channels[block_idx - 1], + randomize_noise=randomize_noise)) + else: + self.add_module( + f'layer{2 * block_idx - 2}', + UpConvBlock(layer_idx=2 * block_idx - 2, + in_channels=self.channels[block_idx - 1], + out_channels=self.channels[block_idx], + randomize_noise=randomize_noise, + fused_scale=fused_scale)) + self.add_module( + f'layer{2 * block_idx - 1}', + ConvBlock(layer_idx=2 * block_idx - 1, + in_channels=self.channels[block_idx], + out_channels=self.channels[block_idx], + randomize_noise=randomize_noise)) + self.add_module( + f'output{block_idx - 1}', + LastConvBlock(in_channels=self.channels[block_idx], + out_channels=output_channels)) + + self.upsample = ResolutionScalingLayer() + self.lod = nn.Parameter(torch.zeros(())) + + def forward(self, w): + lod = self.lod.cpu().tolist() + x = self.layer0(w[:, 0]) + for block_idx in range(1, len(self.channels)): + if block_idx + lod < len(self.channels): + layer_idx = 2 * block_idx - 2 + if layer_idx == 0: + x = self.__getattr__(f'layer{layer_idx}')(w[:, layer_idx]) + else: + x = self.__getattr__(f'layer{layer_idx}')(x, w[:, layer_idx]) + layer_idx = 2 * block_idx - 1 + x = self.__getattr__(f'layer{layer_idx}')(x, w[:, layer_idx]) + image = self.__getattr__(f'output{block_idx - 1}')(x) + else: + image = self.upsample(image) + return image + + +class PixelNormLayer(nn.Module): + """Implements pixel-wise feature vector normalization layer.""" + + def __init__(self, epsilon=1e-8): + super().__init__() + self.epsilon = epsilon + + def forward(self, x): + return x / torch.sqrt(torch.mean(x**2, dim=1, keepdim=True) + self.epsilon) + + +class InstanceNormLayer(nn.Module): + """Implements instance normalization layer.""" + + def __init__(self, epsilon=1e-8): + super().__init__() + self.epsilon = epsilon + + def forward(self, x): + if len(x.shape) != 4: + raise ValueError(f'The input tensor should be with shape [batch_size, ' + f'num_channels, height, width], but {x.shape} received!') + x = x - torch.mean(x, dim=[2, 3], keepdim=True) + x = x / torch.sqrt(torch.mean(x**2, dim=[2, 3], keepdim=True) + + self.epsilon) + return x + + +class ResolutionScalingLayer(nn.Module): + """Implements the resolution scaling layer. + + Basically, this layer can be used to upsample or downsample feature maps from + spatial domain with nearest neighbor interpolation. + """ + + def __init__(self, scale_factor=2): + super().__init__() + self.scale_factor = scale_factor + + def forward(self, x): + return F.interpolate(x, scale_factor=self.scale_factor, mode='nearest') + + +class BlurLayer(nn.Module): + """Implements the blur layer used in StyleGAN.""" + + def __init__(self, + channels, + kernel=(1, 2, 1), + normalize=True, + flip=False): + super().__init__() + kernel = np.array(kernel, dtype=np.float32).reshape(1, 3) + kernel = kernel.T.dot(kernel) + if normalize: + kernel /= np.sum(kernel) + if flip: + kernel = kernel[::-1, ::-1] + kernel = kernel.reshape(3, 3, 1, 1) + kernel = np.tile(kernel, [1, 1, channels, 1]) + kernel = np.transpose(kernel, [2, 3, 0, 1]) + self.register_buffer('kernel', torch.from_numpy(kernel)) + self.channels = channels + + def forward(self, x): + return F.conv2d(x, self.kernel, stride=1, padding=1, groups=self.channels) + + +class NoiseApplyingLayer(nn.Module): + """Implements the noise applying layer used in StyleGAN.""" + + def __init__(self, layer_idx, channels, randomize_noise=False): + super().__init__() + self.randomize_noise = randomize_noise + self.res = 2**(layer_idx // 2 + 2) + self.register_buffer('noise', torch.randn(1, 1, self.res, self.res)) + self.weight = nn.Parameter(torch.zeros(channels)) + + def forward(self, x): + if len(x.shape) != 4: + raise ValueError(f'The input tensor should be with shape [batch_size, ' + f'num_channels, height, width], but {x.shape} received!') + if self.randomize_noise: + noise = torch.randn(x.shape[0], 1, self.res, self.res).to(x) + else: + noise = self.noise + return x + noise * self.weight.view(1, -1, 1, 1) + + +class StyleModulationLayer(nn.Module): + """Implements the style modulation layer used in StyleGAN.""" + + def __init__(self, channels, w_space_dim=512): + super().__init__() + self.channels = channels + self.dense = DenseBlock(in_features=w_space_dim, + out_features=channels*2, + wscale_gain=1.0, + wscale_lr_multiplier=1.0, + activation_type='linear') + + def forward(self, x, w): + if len(w.shape) != 2: + raise ValueError(f'The input tensor should be with shape [batch_size, ' + f'num_channels], but {x.shape} received!') + style = self.dense(w) + style = style.view(-1, 2, self.channels, 1, 1) + return x * (style[:, 0] + 1) + style[:, 1] + + +class WScaleLayer(nn.Module): + """Implements the layer to scale weight variable and add bias. + + Note that, the weight variable is trained in `nn.Conv2d` layer (or `nn.Linear` + layer), and only scaled with a constant number , which is not trainable, in + this layer. However, the bias variable is trainable in this layer. + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size, + gain=np.sqrt(2.0), + lr_multiplier=1.0): + super().__init__() + fan_in = in_channels * kernel_size * kernel_size + self.scale = gain / np.sqrt(fan_in) * lr_multiplier + self.bias = nn.Parameter(torch.zeros(out_channels)) + self.lr_multiplier = lr_multiplier + + def forward(self, x): + if len(x.shape) == 4: + return x * self.scale + self.bias.view(1, -1, 1, 1) * self.lr_multiplier + if len(x.shape) == 2: + return x * self.scale + self.bias.view(1, -1) * self.lr_multiplier + raise ValueError(f'The input tensor should be with shape [batch_size, ' + f'num_channels, height, width], or [batch_size, ' + f'num_channels], but {x.shape} received!') + + +class EpilogueBlock(nn.Module): + """Implements the epilogue block of each conv block.""" + + def __init__(self, + layer_idx, + channels, + randomize_noise=False, + normalization_fn='instance'): + super().__init__() + self.apply_noise = NoiseApplyingLayer(layer_idx, channels, randomize_noise) + self.bias = nn.Parameter(torch.zeros(channels)) + self.activate = nn.LeakyReLU(negative_slope=0.2, inplace=True) + if normalization_fn == 'pixel': + self.norm = PixelNormLayer() + elif normalization_fn == 'instance': + self.norm = InstanceNormLayer() + else: + raise NotImplementedError(f'Not implemented normalization function: ' + f'{normalization_fn}!') + self.style_mod = StyleModulationLayer(channels) + + def forward(self, x, w): + x = self.apply_noise(x) + x = x + self.bias.view(1, -1, 1, 1) + x = self.activate(x) + x = self.norm(x) + x = self.style_mod(x, w) + return x + + +class FirstConvBlock(nn.Module): + """Implements the first convolutional block used in StyleGAN. + + Basically, this block starts from a const input, which is `ones(512, 4, 4)`. + """ + + def __init__(self, in_channels, randomize_noise=False): + super().__init__() + self.first_layer = nn.Parameter(torch.ones(1, in_channels, 4, 4)) + self.epilogue = EpilogueBlock(layer_idx=0, + channels=in_channels, + randomize_noise=randomize_noise) + + def forward(self, w): + x = self.first_layer.repeat(w.shape[0], 1, 1, 1) + x = self.epilogue(x, w) + return x + + +class UpConvBlock(nn.Module): + """Implements the convolutional block used in StyleGAN. + + Basically, this block is used as the first convolutional block for each + resolution, which will execute upsampling. + """ + + def __init__(self, + layer_idx, + in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1, + dilation=1, + add_bias=False, + fused_scale='auto', + wscale_gain=np.sqrt(2.0), + wscale_lr_multiplier=1.0, + randomize_noise=False): + """Initializes the class with block settings. + + Args: + in_channels: Number of channels of the input tensor fed into this block. + out_channels: Number of channels (kernels) of the output tensor. + kernel_size: Size of the convolutional kernel. + stride: Stride parameter for convolution operation. + padding: Padding parameter for convolution operation. + dilation: Dilation rate for convolution operation. + add_bias: Whether to add bias onto the convolutional result. + fused_scale: Whether to fuse `upsample` and `conv2d` together, resulting + in `conv2d_transpose`. + wscale_gain: The gain factor for `wscale` layer. + wscale_lr_multiplier: The learning rate multiplier factor for `wscale` + layer. + randomize_noise: Whether to add random noise. + + Raises: + ValueError: If the block is not applied to the first block for a + particular resolution. Or `fused_scale` does not belong to [True, False, + `auto`]. + """ + super().__init__() + if layer_idx % 2 == 1: + raise ValueError(f'This block is implemented as the first block of each ' + f'resolution, but is applied to layer {layer_idx}!') + if fused_scale not in [True, False, 'auto']: + raise ValueError(f'`fused_scale` can only be [True, False, `auto`], ' + f'but {fused_scale} received!') + + cur_res = 2 ** (layer_idx // 2 + 2) + if fused_scale == 'auto': + self.fused_scale = (cur_res >= _AUTO_FUSED_SCALE_MIN_RES) + else: + self.fused_scale = fused_scale + + if self.fused_scale: + self.weight = nn.Parameter( + torch.randn(kernel_size, kernel_size, in_channels, out_channels)) + + else: + self.upsample = ResolutionScalingLayer() + self.conv = nn.Conv2d(in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=1, + bias=add_bias) + + fan_in = in_channels * kernel_size * kernel_size + self.scale = wscale_gain / np.sqrt(fan_in) * wscale_lr_multiplier + self.blur = BlurLayer(channels=out_channels) + self.epilogue = EpilogueBlock(layer_idx=layer_idx, + channels=out_channels, + randomize_noise=randomize_noise) + + def forward(self, x, w): + if self.fused_scale: + kernel = self.weight * self.scale + kernel = F.pad(kernel, (0, 0, 0, 0, 1, 1, 1, 1), 'constant', 0.0) + kernel = (kernel[1:, 1:] + kernel[:-1, 1:] + + kernel[1:, :-1] + kernel[:-1, :-1]) + kernel = kernel.permute(2, 3, 0, 1) + x = F.conv_transpose2d(x, kernel, stride=2, padding=1) + else: + x = self.upsample(x) + x = self.conv(x) * self.scale + x = self.blur(x) + x = self.epilogue(x, w) + return x + + +class ConvBlock(nn.Module): + """Implements the convolutional block used in StyleGAN. + + Basically, this block is used as the second convolutional block for each + resolution. + """ + + def __init__(self, + layer_idx, + in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1, + dilation=1, + add_bias=False, + wscale_gain=np.sqrt(2.0), + wscale_lr_multiplier=1.0, + randomize_noise=False): + """Initializes the class with block settings. + + Args: + in_channels: Number of channels of the input tensor fed into this block. + out_channels: Number of channels (kernels) of the output tensor. + kernel_size: Size of the convolutional kernel. + stride: Stride parameter for convolution operation. + padding: Padding parameter for convolution operation. + dilation: Dilation rate for convolution operation. + add_bias: Whether to add bias onto the convolutional result. + wscale_gain: The gain factor for `wscale` layer. + wscale_lr_multiplier: The learning rate multiplier factor for `wscale` + layer. + randomize_noise: Whether to add random noise. + + Raises: + ValueError: If the block is not applied to the second block for a + particular resolution. + """ + super().__init__() + if layer_idx % 2 == 0: + raise ValueError(f'This block is implemented as the second block of each ' + f'resolution, but is applied to layer {layer_idx}!') + + self.conv = nn.Conv2d(in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=1, + bias=add_bias) + fan_in = in_channels * kernel_size * kernel_size + self.scale = wscale_gain / np.sqrt(fan_in) * wscale_lr_multiplier + self.epilogue = EpilogueBlock(layer_idx=layer_idx, + channels=out_channels, + randomize_noise=randomize_noise) + + def forward(self, x, w): + x = self.conv(x) * self.scale + x = self.epilogue(x, w) + return x + + +class LastConvBlock(nn.Module): + """Implements the last convolutional block used in StyleGAN. + + Basically, this block converts the final feature map to RGB image. + """ + + def __init__(self, in_channels, out_channels=3): + super().__init__() + self.conv = nn.Conv2d(in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + bias=False) + self.scale = 1 / np.sqrt(in_channels) + self.bias = nn.Parameter(torch.zeros(3)) + + def forward(self, x): + x = self.conv(x) * self.scale + x = x + self.bias.view(1, -1, 1, 1) + return x + + +class DenseBlock(nn.Module): + """Implements the dense block used in StyleGAN. + + Basically, this block executes fully-connected layer, weight-scale layer, + and activation layer in sequence. + """ + + def __init__(self, + in_features, + out_features, + add_bias=False, + wscale_gain=np.sqrt(2.0), + wscale_lr_multiplier=0.01, + activation_type='lrelu'): + """Initializes the class with block settings. + + Args: + in_features: Number of channels of the input tensor fed into this block. + out_features: Number of channels of the output tensor. + add_bias: Whether to add bias onto the fully-connected result. + wscale_gain: The gain factor for `wscale` layer. + wscale_lr_multiplier: The learning rate multiplier factor for `wscale` + layer. + activation_type: Type of activation function. Support `linear` and + `lrelu`. + + Raises: + NotImplementedError: If the input `activation_type` is not supported. + """ + super().__init__() + self.linear = nn.Linear(in_features=in_features, + out_features=out_features, + bias=add_bias) + self.wscale = WScaleLayer(in_channels=in_features, + out_channels=out_features, + kernel_size=1, + gain=wscale_gain, + lr_multiplier=wscale_lr_multiplier) + if activation_type == 'linear': + self.activate = nn.Identity() + elif activation_type == 'lrelu': + self.activate = nn.LeakyReLU(negative_slope=0.2, inplace=True) + else: + raise NotImplementedError(f'Not implemented activation function: ' + f'{activation_type}!') + + def forward(self, x): + x = self.linear(x) + x = self.wscale(x) + x = self.activate(x) + return x diff --git a/models/stylegan_tf_official/LICENSE.txt b/models/stylegan_tf_official/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..ca56419327bbeeb8094330497024f109bd52b96d --- /dev/null +++ b/models/stylegan_tf_official/LICENSE.txt @@ -0,0 +1,410 @@ +Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + + +Attribution-NonCommercial 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-NonCommercial 4.0 International Public +License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-NonCommercial 4.0 International Public License ("Public +License"). To the extent this Public License may be interpreted as a +contract, You are granted the Licensed Rights in consideration of Your +acceptance of these terms and conditions, and the Licensor grants You +such rights in consideration of benefits the Licensor receives from +making the Licensed Material available under these terms and +conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + d. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + e. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + f. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + g. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + h. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + i. NonCommercial means not primarily intended for or directed towards + commercial advantage or monetary compensation. For purposes of + this Public License, the exchange of the Licensed Material for + other material subject to Copyright and Similar Rights by digital + file-sharing or similar means is NonCommercial provided there is + no payment of monetary compensation in connection with the + exchange. + + j. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + k. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + l. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part, for NonCommercial purposes only; and + + b. produce, reproduce, and Share Adapted Material for + NonCommercial purposes only. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties, including when + the Licensed Material is used other than for NonCommercial + purposes. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + 4. If You Share Adapted Material You produce, the Adapter's + License You apply must not prevent recipients of the Adapted + Material from complying with this Public License. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database for NonCommercial purposes + only; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material; and + + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + +======================================================================= + +Creative Commons is not a party to its public +licenses. Notwithstanding, Creative Commons may elect to apply one of +its public licenses to material it publishes and in those instances +will be considered the "Licensor." The text of the Creative Commons +public licenses is dedicated to the public domain under the CC0 Public +Domain Dedication. Except for the limited purpose of indicating that +material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the +public licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/models/stylegan_tf_official/README.md b/models/stylegan_tf_official/README.md new file mode 100644 index 0000000000000000000000000000000000000000..187bd71422da54f148b5515e9bc5feebd7c2e79e --- /dev/null +++ b/models/stylegan_tf_official/README.md @@ -0,0 +1,234 @@ +## StyleGAN — Official TensorFlow Implementation +![Python 3.6](https://img.shields.io/badge/python-3.6-green.svg?style=plastic) +![TensorFlow 1.10](https://img.shields.io/badge/tensorflow-1.10-green.svg?style=plastic) +![cuDNN 7.3.1](https://img.shields.io/badge/cudnn-7.3.1-green.svg?style=plastic) +![License CC BY-NC](https://img.shields.io/badge/license-CC_BY--NC-green.svg?style=plastic) + +![Teaser image](./stylegan-teaser.png) +**Picture:** *These people are not real – they were produced by our generator that allows control over different aspects of the image.* + +This repository contains the official TensorFlow implementation of the following paper: + +> **A Style-Based Generator Architecture for Generative Adversarial Networks**
+> Tero Karras (NVIDIA), Samuli Laine (NVIDIA), Timo Aila (NVIDIA)
+> http://stylegan.xyz/paper +> +> **Abstract:** *We propose an alternative generator architecture for generative adversarial networks, borrowing from style transfer literature. The new architecture leads to an automatically learned, unsupervised separation of high-level attributes (e.g., pose and identity when trained on human faces) and stochastic variation in the generated images (e.g., freckles, hair), and it enables intuitive, scale-specific control of the synthesis. The new generator improves the state-of-the-art in terms of traditional distribution quality metrics, leads to demonstrably better interpolation properties, and also better disentangles the latent factors of variation. To quantify interpolation quality and disentanglement, we propose two new, automated methods that are applicable to any generator architecture. Finally, we introduce a new, highly varied and high-quality dataset of human faces.* + +For business inquiries, please contact [researchinquiries@nvidia.com](mailto:researchinquiries@nvidia.com) + +For press and other inquiries, please contact Hector Marinez at [hmarinez@nvidia.com](mailto:hmarinez@nvidia.com) + +## Resources + +All material related to our paper is available via the following links: + +| Link | Description +| :-------------- | :---------- +| http://stylegan.xyz/paper | Paper PDF. +| http://stylegan.xyz/video | Result video. +| http://stylegan.xyz/code | Source code. +| http://stylegan.xyz/ffhq | Flickr-Faces-HQ dataset. +| http://stylegan.xyz/drive | Google Drive folder. + +Additional material can be found in Google Drive folder: + +| Path | Description +| :--- | :---------- +| [StyleGAN](http://stylegan.xyz/drive) | Main folder. +| ├  [stylegan-paper.pdf](https://drive.google.com/open?id=1v-HkF3Ehrpon7wVIx4r5DLcko_U_V6Lt) | High-quality version of the paper PDF. +| ├  [stylegan-video.mp4](https://drive.google.com/open?id=1uzwkZHQX_9pYg1i0d1Nbe3D9xPO8-qBf) | High-quality version of the result video. +| ├  [images](https://drive.google.com/open?id=1-l46akONUWF6LCpDoeq63H53rD7MeiTd) | Example images produced using our generator. +| │  ├  [representative-images](https://drive.google.com/open?id=1ToY5P4Vvf5_c3TyUizQ8fckFFoFtBvD8) | High-quality images to be used in articles, blog posts, etc. +| │  └  [100k-generated-images](https://drive.google.com/open?id=100DJ0QXyG89HZzB4w2Cbyf4xjNK54cQ1) | 100,000 generated images for different amounts of truncation. +| │     ├  [ffhq-1024x1024](https://drive.google.com/open?id=14lm8VRN1pr4g_KVe6_LvyDX1PObst6d4) | Generated using Flickr-Faces-HQ dataset at 1024×1024. +| │     ├  [bedrooms-256x256](https://drive.google.com/open?id=1Vxz9fksw4kgjiHrvHkX4Hze4dyThFW6t) | Generated using LSUN Bedroom dataset at 256×256. +| │     ├  [cars-512x384](https://drive.google.com/open?id=1MFCvOMdLE2_mpeLPTiDw5dxc2CRuKkzS) | Generated using LSUN Car dataset at 512×384. +| │     └  [cats-256x256](https://drive.google.com/open?id=1gq-Gj3GRFiyghTPKhp8uDMA9HV_0ZFWQ) | Generated using LSUN Cat dataset at 256×256. +| ├  [videos](https://drive.google.com/open?id=1N8pOd_Bf8v89NGUaROdbD8-ayLPgyRRo) | Example videos produced using our generator. +| │  └  [high-quality-video-clips](https://drive.google.com/open?id=1NFO7_vH0t98J13ckJYFd7kuaTkyeRJ86) | Individual segments of the result video as high-quality MP4. +| ├  [ffhq-dataset](https://drive.google.com/open?id=1u2xu7bSrWxrbUxk-dT-UvEJq8IjdmNTP) | Raw data for the [Flickr-Faces-HQ dataset](http://stylegan.xyz/ffhq). +| └  [networks](https://drive.google.com/open?id=1MASQyN5m0voPcx7-9K0r5gObhvvPups7) | Pre-trained networks as pickled instances of [dnnlib.tflib.Network](./dnnlib/tflib/network.py). +|    ├  [stylegan-ffhq-1024x1024.pkl](https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ) | StyleGAN trained with Flickr-Faces-HQ dataset at 1024×1024. +|    ├  [stylegan-celebahq-1024x1024.pkl](https://drive.google.com/uc?id=1MGqJl28pN4t7SAtSrPdSRJSQJqahkzUf) | StyleGAN trained with CelebA-HQ dataset at 1024×1024. +|    ├  [stylegan-bedrooms-256x256.pkl](https://drive.google.com/uc?id=1MOSKeGF0FJcivpBI7s63V9YHloUTORiF) | StyleGAN trained with LSUN Bedroom dataset at 256×256. +|    ├  [stylegan-cars-512x384.pkl](https://drive.google.com/uc?id=1MJ6iCfNtMIRicihwRorsM3b7mmtmK9c3) | StyleGAN trained with LSUN Car dataset at 512×384. +|    ├  [stylegan-cats-256x256.pkl](https://drive.google.com/uc?id=1MQywl0FNt6lHu8E_EUqnRbviagS7fbiJ) | StyleGAN trained with LSUN Cat dataset at 256×256. +|    └  [metrics](https://drive.google.com/open?id=1MvYdWCBuMfnoYGptRH-AgKLbPTsIQLhl) | Auxiliary networks for the quality and disentanglement metrics. +|       ├  [inception_v3_features.pkl](https://drive.google.com/uc?id=1MzTY44rLToO5APn8TZmfR7_ENSe5aZUn) | Standard [Inception-v3](https://arxiv.org/abs/1512.00567) classifier that outputs a raw feature vector. +|       ├  [vgg16_zhang_perceptual.pkl](https://drive.google.com/uc?id=1N2-m9qszOeVC9Tq77WxsLnuWwOedQiD2) | Standard [LPIPS](https://arxiv.org/abs/1801.03924) metric to estimate perceptual similarity. +|       ├  [celebahq-classifier-00-male.pkl](https://drive.google.com/uc?id=1Q5-AI6TwWhCVM7Muu4tBM7rp5nG_gmCX) | Binary classifier trained to detect a single attribute of CelebA-HQ. +|       └ ⋯ | Please see the file listing for remaining networks. + +## Licenses + +All material, excluding the Flickr-Faces-HQ dataset, is made available under [Creative Commons BY-NC 4.0](https://creativecommons.org/licenses/by-nc/4.0/) license by NVIDIA Corporation. You can **use, redistribute, and adapt** the material for **non-commercial purposes**, as long as you give appropriate credit by **citing our paper** and **indicating any changes** that you've made. + +For license information regarding the FFHQ dataset, please refer to the [Flickr-Faces-HQ repository](http://stylegan.xyz/ffhq). + +`inception_v3_features.pkl` and `inception_v3_softmax.pkl` are derived from the pre-trained [Inception-v3](https://arxiv.org/abs/1512.00567) network by Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens, and Zbigniew Wojna. The network was originally shared under [Apache 2.0](https://github.com/tensorflow/models/blob/master/LICENSE) license on the [TensorFlow Models](https://github.com/tensorflow/models) repository. + +`vgg16.pkl` and `vgg16_zhang_perceptual.pkl` are derived from the pre-trained [VGG-16](https://arxiv.org/abs/1409.1556) network by Karen Simonyan and Andrew Zisserman. The network was originally shared under [Creative Commons BY 4.0](https://creativecommons.org/licenses/by/4.0/) license on the [Very Deep Convolutional Networks for Large-Scale Visual Recognition](http://www.robots.ox.ac.uk/~vgg/research/very_deep/) project page. + +`vgg16_zhang_perceptual.pkl` is further derived from the pre-trained [LPIPS](https://arxiv.org/abs/1801.03924) weights by Richard Zhang, Phillip Isola, Alexei A. Efros, Eli Shechtman, and Oliver Wang. The weights were originally shared under [BSD 2-Clause "Simplified" License](https://github.com/richzhang/PerceptualSimilarity/blob/master/LICENSE) on the [PerceptualSimilarity](https://github.com/richzhang/PerceptualSimilarity) repository. + +## System requirements + +* Both Linux and Windows are supported, but we strongly recommend Linux for performance and compatibility reasons. +* 64-bit Python 3.6 installation. We recommend Anaconda3 with numpy 1.14.3 or newer. +* TensorFlow 1.10.0 or newer with GPU support. +* One or more high-end NVIDIA GPUs with at least 11GB of DRAM. We recommend NVIDIA DGX-1 with 8 Tesla V100 GPUs. +* NVIDIA driver 391.35 or newer, CUDA toolkit 9.0 or newer, cuDNN 7.3.1 or newer. + +## Using pre-trained networks + +A minimal example of using a pre-trained StyleGAN generator is given in [pretrained_example.py](./pretrained_example.py). When executed, the script downloads a pre-trained StyleGAN generator from Google Drive and uses it to generate an image: + +``` +> python pretrained_example.py +Downloading https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ .... done + +Gs Params OutputShape WeightShape +--- --- --- --- +latents_in - (?, 512) - +... +images_out - (?, 3, 1024, 1024) - +--- --- --- --- +Total 26219627 + +> ls results +example.png # https://drive.google.com/uc?id=1UDLT_zb-rof9kKH0GwiJW_bS9MoZi8oP +``` + +A more advanced example is given in [generate_figures.py](./generate_figures.py). The script reproduces the figures from our paper in order to illustrate style mixing, noise inputs, and truncation: +``` +> python generate_figures.py +results/figure02-uncurated-ffhq.png # https://drive.google.com/uc?id=1U3r1xgcD7o-Fd0SBRpq8PXYajm7_30cu +results/figure03-style-mixing.png # https://drive.google.com/uc?id=1U-nlMDtpnf1RcYkaFQtbh5oxnhA97hy6 +results/figure04-noise-detail.png # https://drive.google.com/uc?id=1UX3m39u_DTU6eLnEW6MqGzbwPFt2R9cG +results/figure05-noise-components.png # https://drive.google.com/uc?id=1UQKPcvYVeWMRccGMbs2pPD9PVv1QDyp_ +results/figure08-truncation-trick.png # https://drive.google.com/uc?id=1ULea0C12zGlxdDQFNLXOWZCHi3QNfk_v +results/figure10-uncurated-bedrooms.png # https://drive.google.com/uc?id=1UEBnms1XMfj78OHj3_cx80mUf_m9DUJr +results/figure11-uncurated-cars.png # https://drive.google.com/uc?id=1UO-4JtAs64Kun5vIj10UXqAJ1d5Ir1Ke +results/figure12-uncurated-cats.png # https://drive.google.com/uc?id=1USnJc14prlu3QAYxstrtlfXC9sDWPA-W +``` + +The pre-trained networks are stored as standard pickle files on Google Drive: + +``` +# Load pre-trained network. +url = 'https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ' # karras2019stylegan-ffhq-1024x1024.pkl +with dnnlib.util.open_url(url, cache_dir=config.cache_dir) as f: + _G, _D, Gs = pickle.load(f) + # _G = Instantaneous snapshot of the generator. Mainly useful for resuming a previous training run. + # _D = Instantaneous snapshot of the discriminator. Mainly useful for resuming a previous training run. + # Gs = Long-term average of the generator. Yields higher-quality results than the instantaneous snapshot. +``` + +The above code downloads the file and unpickles it to yield 3 instances of [dnnlib.tflib.Network](./dnnlib/tflib/network.py). To generate images, you will typically want to use `Gs` – the other two networks are provided for completeness. In order for `pickle.load()` to work, you will need to have the `dnnlib` source directory in your PYTHONPATH and a `tf.Session` set as default. The session can initialized by calling `dnnlib.tflib.init_tf()`. + +There are three ways to use the pre-trained generator: + +1. Use `Gs.run()` for immediate-mode operation where the inputs and outputs are numpy arrays: + ``` + # Pick latent vector. + rnd = np.random.RandomState(5) + latents = rnd.randn(1, Gs.input_shape[1]) + + # Generate image. + fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True) + images = Gs.run(latents, None, truncation_psi=0.7, randomize_noise=True, output_transform=fmt) + ``` + The first argument is a batch of latent vectors of shape `[num, 512]`. The second argument is reserved for class labels (not used by StyleGAN). The remaining keyword arguments are optional and can be used to further modify the operation (see below). The output is a batch of images, whose format is dictated by the `output_transform` argument. + +2. Use `Gs.get_output_for()` to incorporate the generator as a part of a larger TensorFlow expression: + ``` + latents = tf.random_normal([self.minibatch_per_gpu] + Gs_clone.input_shape[1:]) + images = Gs_clone.get_output_for(latents, None, is_validation=True, randomize_noise=True) + images = tflib.convert_images_to_uint8(images) + result_expr.append(inception_clone.get_output_for(images)) + ``` + The above code is from [metrics/frechet_inception_distance.py](./metrics/frechet_inception_distance.py). It generates a batch of random images and feeds them directly to the [Inception-v3](https://arxiv.org/abs/1512.00567) network without having to convert the data to numpy arrays in between. + +3. Look up `Gs.components.mapping` and `Gs.components.synthesis` to access individual sub-networks of the generator. Similar to `Gs`, the sub-networks are represented as independent instances of [dnnlib.tflib.Network](./dnnlib/tflib/network.py): + ``` + src_latents = np.stack(np.random.RandomState(seed).randn(Gs.input_shape[1]) for seed in src_seeds) + src_dlatents = Gs.components.mapping.run(src_latents, None) # [seed, layer, component] + src_images = Gs.components.synthesis.run(src_dlatents, randomize_noise=False, **synthesis_kwargs) + ``` + The above code is from [generate_figures.py](./generate_figures.py). It first transforms a batch of latent vectors into the intermediate *W* space using the mapping network and then turns these vectors into a batch of images using the synthesis network. The `dlatents` array stores a separate copy of the same *w* vector for each layer of the synthesis network to facilitate style mixing. + +The exact details of the generator are defined in [training/networks_stylegan.py](./training/networks_stylegan.py) (see `G_style`, `G_mapping`, and `G_synthesis`). The following keyword arguments can be specified to modify the behavior when calling `run()` and `get_output_for()`: + +* `truncation_psi` and `truncation_cutoff` control the truncation trick that that is performed by default when using `Gs` (ψ=0.7, cutoff=8). It can be disabled by setting `truncation_psi=1` or `is_validation=True`, and the image quality can be further improved at the cost of variation by setting e.g. `truncation_psi=0.5`. Note that truncation is always disabled when using the sub-networks directly. The average *w* needed to manually perform the truncation trick can be looked up using `Gs.get_var('dlatent_avg')`. + +* `randomize_noise` determines whether to use re-randomize the noise inputs for each generated image (`True`, default) or whether to use specific noise values for the entire minibatch (`False`). The specific values can be accessed via the `tf.Variable` instances that are found using `[var for name, var in Gs.components.synthesis.vars.items() if name.startswith('noise')]`. + +* When using the mapping network directly, you can specify `dlatent_broadcast=None` to disable the automatic duplication of `dlatents` over the layers of the synthesis network. + +* Runtime performance can be fine-tuned via `structure='fixed'` and `dtype='float16'`. The former disables support for progressive growing, which is not needed for a fully-trained generator, and the latter performs all computation using half-precision floating point arithmetic. + +## Preparing datasets for training + +The training and evaluation scripts operate on datasets stored as multi-resolution TFRecords. Each dataset is represented by a directory containing the same image data in several resolutions to enable efficient streaming. There is a separate *.tfrecords file for each resolution, and if the dataset contains labels, they are stored in a separate file as well. By default, the scripts expect to find the datasets at `datasets//-.tfrecords`. The directory can be changed by editing [config.py](./config.py): + +``` +result_dir = 'results' +data_dir = 'datasets' +cache_dir = 'cache' +``` + +To obtain the FFHQ dataset (`datasets/ffhq`), please refer to the [Flickr-Faces-HQ repository](http://stylegan.xyz/ffhq). + +To obtain the CelebA-HQ dataset (`datasets/celebahq`), please refer to the [Progressive GAN repository](https://github.com/tkarras/progressive_growing_of_gans). + +To obtain other datasets, including LSUN, please consult their corresponding project pages. The datasets can be converted to multi-resolution TFRecords using the provided [dataset_tool.py](./dataset_tool.py): + +``` +> python dataset_tool.py create_lsun datasets/lsun-bedroom-full ~/lsun/bedroom_lmdb --resolution 256 +> python dataset_tool.py create_lsun_wide datasets/lsun-car-512x384 ~/lsun/car_lmdb --width 512 --height 384 +> python dataset_tool.py create_lsun datasets/lsun-cat-full ~/lsun/cat_lmdb --resolution 256 +> python dataset_tool.py create_cifar10 datasets/cifar10 ~/cifar10 +> python dataset_tool.py create_from_images datasets/custom-dataset ~/custom-images +``` + +## Training networks + +Once the datasets are set up, you can train your own StyleGAN networks as follows: + +1. Edit [train.py](./train.py) to specify the dataset and training configuration by uncommenting or editing specific lines. +2. Run the training script with `python train.py`. +3. The results are written to a newly created directory `results/-`. +4. The training may take several days (or weeks) to complete, depending on the configuration. + +By default, `train.py` is configured to train the highest-quality StyleGAN (configuration F in Table 1) for the FFHQ dataset at 1024×1024 resolution using 8 GPUs. Please note that we have used 8 GPUs in all of our experiments. Training with fewer GPUs may not produce identical results – if you wish to compare against our technique, we strongly recommend using the same number of GPUs. + +Expected training times for the default configuration using Tesla V100 GPUs: + +| GPUs | 1024×1024 | 512×512 | 256×256 | +| :--- | :-------------- | :------------ | :------------ | +| 1 | 41 days 4 hours | 24 days 21 hours | 14 days 22 hours | +| 2 | 21 days 22 hours | 13 days 7 hours | 9 days 5 hours | +| 4 | 11 days 8 hours | 7 days 0 hours | 4 days 21 hours | +| 8 | 6 days 14 hours | 4 days 10 hours | 3 days 8 hours | + +## Evaluating quality and disentanglement + +The quality and disentanglement metrics used in our paper can be evaluated using [run_metrics.py](./run_metrics.py). By default, the script will evaluate the Fréchet Inception Distance (`fid50k`) for the pre-trained FFHQ generator and write the results into a newly created directory under `results`. The exact behavior can be changed by uncommenting or editing specific lines in [run_metrics.py](./run_metrics.py). + +Expected evaluation time and results for the pre-trained FFHQ generator using one Tesla V100 GPU: + +| Metric | Time | Result | Description +| :----- | :--- | :----- | :---------- +| fid50k | 16 min | 4.4159 | Fréchet Inception Distance using 50,000 images. +| ppl_zfull | 55 min | 664.8854 | Perceptual Path Length for full paths in *Z*. +| ppl_wfull | 55 min | 233.3059 | Perceptual Path Length for full paths in *W*. +| ppl_zend | 55 min | 666.1057 | Perceptual Path Length for path endpoints in *Z*. +| ppl_wend | 55 min | 197.2266 | Perceptual Path Length for path endpoints in *W*. +| ls | 10 hours | z: 165.0106
w: 3.7447 | Linear Separability in *Z* and *W*. + +Please note that the exact results may vary from run to run due to the non-deterministic nature of TensorFlow. + +## Acknowledgements + +We thank Jaakko Lehtinen, David Luebke, and Tuomas Kynkäänniemi for in-depth discussions and helpful comments; Janne Hellsten, Tero Kuosmanen, and Pekka Jänis for compute infrastructure and help with the code release. diff --git a/models/stylegan_tf_official/config.py b/models/stylegan_tf_official/config.py new file mode 100644 index 0000000000000000000000000000000000000000..4eb9b33526c78b63cf4e36d8ea5ab39ab34f3808 --- /dev/null +++ b/models/stylegan_tf_official/config.py @@ -0,0 +1,18 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +"""Global configuration.""" + +#---------------------------------------------------------------------------- +# Paths. + +result_dir = 'results' +data_dir = 'datasets' +cache_dir = 'cache' +run_dir_ignore = ['results', 'datasets', 'cache'] + +#---------------------------------------------------------------------------- diff --git a/models/stylegan_tf_official/dataset_tool.py b/models/stylegan_tf_official/dataset_tool.py new file mode 100644 index 0000000000000000000000000000000000000000..4ddfe448e2ccaa30e04ad4b49761d406846c962f --- /dev/null +++ b/models/stylegan_tf_official/dataset_tool.py @@ -0,0 +1,645 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +"""Tool for creating multi-resolution TFRecords datasets for StyleGAN and ProGAN.""" + +# pylint: disable=too-many-lines +import os +import sys +import glob +import argparse +import threading +import six.moves.queue as Queue # pylint: disable=import-error +import traceback +import numpy as np +import tensorflow as tf +import PIL.Image +import dnnlib.tflib as tflib + +from training import dataset + +#---------------------------------------------------------------------------- + +def error(msg): + print('Error: ' + msg) + exit(1) + +#---------------------------------------------------------------------------- + +class TFRecordExporter: + def __init__(self, tfrecord_dir, expected_images, print_progress=True, progress_interval=10): + self.tfrecord_dir = tfrecord_dir + self.tfr_prefix = os.path.join(self.tfrecord_dir, os.path.basename(self.tfrecord_dir)) + self.expected_images = expected_images + self.cur_images = 0 + self.shape = None + self.resolution_log2 = None + self.tfr_writers = [] + self.print_progress = print_progress + self.progress_interval = progress_interval + + if self.print_progress: + print('Creating dataset "%s"' % tfrecord_dir) + if not os.path.isdir(self.tfrecord_dir): + os.makedirs(self.tfrecord_dir) + assert os.path.isdir(self.tfrecord_dir) + + def close(self): + if self.print_progress: + print('%-40s\r' % 'Flushing data...', end='', flush=True) + for tfr_writer in self.tfr_writers: + tfr_writer.close() + self.tfr_writers = [] + if self.print_progress: + print('%-40s\r' % '', end='', flush=True) + print('Added %d images.' % self.cur_images) + + def choose_shuffled_order(self): # Note: Images and labels must be added in shuffled order. + order = np.arange(self.expected_images) + np.random.RandomState(123).shuffle(order) + return order + + def add_image(self, img): + if self.print_progress and self.cur_images % self.progress_interval == 0: + print('%d / %d\r' % (self.cur_images, self.expected_images), end='', flush=True) + if self.shape is None: + self.shape = img.shape + self.resolution_log2 = int(np.log2(self.shape[1])) + assert self.shape[0] in [1, 3] + assert self.shape[1] == self.shape[2] + assert self.shape[1] == 2**self.resolution_log2 + tfr_opt = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.NONE) + for lod in range(self.resolution_log2 - 1): + tfr_file = self.tfr_prefix + '-r%02d.tfrecords' % (self.resolution_log2 - lod) + self.tfr_writers.append(tf.python_io.TFRecordWriter(tfr_file, tfr_opt)) + assert img.shape == self.shape + for lod, tfr_writer in enumerate(self.tfr_writers): + if lod: + img = img.astype(np.float32) + img = (img[:, 0::2, 0::2] + img[:, 0::2, 1::2] + img[:, 1::2, 0::2] + img[:, 1::2, 1::2]) * 0.25 + quant = np.rint(img).clip(0, 255).astype(np.uint8) + ex = tf.train.Example(features=tf.train.Features(feature={ + 'shape': tf.train.Feature(int64_list=tf.train.Int64List(value=quant.shape)), + 'data': tf.train.Feature(bytes_list=tf.train.BytesList(value=[quant.tostring()]))})) + tfr_writer.write(ex.SerializeToString()) + self.cur_images += 1 + + def add_labels(self, labels): + if self.print_progress: + print('%-40s\r' % 'Saving labels...', end='', flush=True) + assert labels.shape[0] == self.cur_images + with open(self.tfr_prefix + '-rxx.labels', 'wb') as f: + np.save(f, labels.astype(np.float32)) + + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + +#---------------------------------------------------------------------------- + +class ExceptionInfo(object): + def __init__(self): + self.value = sys.exc_info()[1] + self.traceback = traceback.format_exc() + +#---------------------------------------------------------------------------- + +class WorkerThread(threading.Thread): + def __init__(self, task_queue): + threading.Thread.__init__(self) + self.task_queue = task_queue + + def run(self): + while True: + func, args, result_queue = self.task_queue.get() + if func is None: + break + try: + result = func(*args) + except: + result = ExceptionInfo() + result_queue.put((result, args)) + +#---------------------------------------------------------------------------- + +class ThreadPool(object): + def __init__(self, num_threads): + assert num_threads >= 1 + self.task_queue = Queue.Queue() + self.result_queues = dict() + self.num_threads = num_threads + for _idx in range(self.num_threads): + thread = WorkerThread(self.task_queue) + thread.daemon = True + thread.start() + + def add_task(self, func, args=()): + assert hasattr(func, '__call__') # must be a function + if func not in self.result_queues: + self.result_queues[func] = Queue.Queue() + self.task_queue.put((func, args, self.result_queues[func])) + + def get_result(self, func): # returns (result, args) + result, args = self.result_queues[func].get() + if isinstance(result, ExceptionInfo): + print('\n\nWorker thread caught an exception:\n' + result.traceback) + raise result.value + return result, args + + def finish(self): + for _idx in range(self.num_threads): + self.task_queue.put((None, (), None)) + + def __enter__(self): # for 'with' statement + return self + + def __exit__(self, *excinfo): + self.finish() + + def process_items_concurrently(self, item_iterator, process_func=lambda x: x, pre_func=lambda x: x, post_func=lambda x: x, max_items_in_flight=None): + if max_items_in_flight is None: max_items_in_flight = self.num_threads * 4 + assert max_items_in_flight >= 1 + results = [] + retire_idx = [0] + + def task_func(prepared, _idx): + return process_func(prepared) + + def retire_result(): + processed, (_prepared, idx) = self.get_result(task_func) + results[idx] = processed + while retire_idx[0] < len(results) and results[retire_idx[0]] is not None: + yield post_func(results[retire_idx[0]]) + results[retire_idx[0]] = None + retire_idx[0] += 1 + + for idx, item in enumerate(item_iterator): + prepared = pre_func(item) + results.append(None) + self.add_task(func=task_func, args=(prepared, idx)) + while retire_idx[0] < idx - max_items_in_flight + 2: + for res in retire_result(): yield res + while retire_idx[0] < len(results): + for res in retire_result(): yield res + +#---------------------------------------------------------------------------- + +def display(tfrecord_dir): + print('Loading dataset "%s"' % tfrecord_dir) + tflib.init_tf({'gpu_options.allow_growth': True}) + dset = dataset.TFRecordDataset(tfrecord_dir, max_label_size='full', repeat=False, shuffle_mb=0) + tflib.init_uninitialized_vars() + import cv2 # pip install opencv-python + + idx = 0 + while True: + try: + images, labels = dset.get_minibatch_np(1) + except tf.errors.OutOfRangeError: + break + if idx == 0: + print('Displaying images') + cv2.namedWindow('dataset_tool') + print('Press SPACE or ENTER to advance, ESC to exit') + print('\nidx = %-8d\nlabel = %s' % (idx, labels[0].tolist())) + cv2.imshow('dataset_tool', images[0].transpose(1, 2, 0)[:, :, ::-1]) # CHW => HWC, RGB => BGR + idx += 1 + if cv2.waitKey() == 27: + break + print('\nDisplayed %d images.' % idx) + +#---------------------------------------------------------------------------- + +def extract(tfrecord_dir, output_dir): + print('Loading dataset "%s"' % tfrecord_dir) + tflib.init_tf({'gpu_options.allow_growth': True}) + dset = dataset.TFRecordDataset(tfrecord_dir, max_label_size=0, repeat=False, shuffle_mb=0) + tflib.init_uninitialized_vars() + + print('Extracting images to "%s"' % output_dir) + if not os.path.isdir(output_dir): + os.makedirs(output_dir) + idx = 0 + while True: + if idx % 10 == 0: + print('%d\r' % idx, end='', flush=True) + try: + images, _labels = dset.get_minibatch_np(1) + except tf.errors.OutOfRangeError: + break + if images.shape[1] == 1: + img = PIL.Image.fromarray(images[0][0], 'L') + else: + img = PIL.Image.fromarray(images[0].transpose(1, 2, 0), 'RGB') + img.save(os.path.join(output_dir, 'img%08d.png' % idx)) + idx += 1 + print('Extracted %d images.' % idx) + +#---------------------------------------------------------------------------- + +def compare(tfrecord_dir_a, tfrecord_dir_b, ignore_labels): + max_label_size = 0 if ignore_labels else 'full' + print('Loading dataset "%s"' % tfrecord_dir_a) + tflib.init_tf({'gpu_options.allow_growth': True}) + dset_a = dataset.TFRecordDataset(tfrecord_dir_a, max_label_size=max_label_size, repeat=False, shuffle_mb=0) + print('Loading dataset "%s"' % tfrecord_dir_b) + dset_b = dataset.TFRecordDataset(tfrecord_dir_b, max_label_size=max_label_size, repeat=False, shuffle_mb=0) + tflib.init_uninitialized_vars() + + print('Comparing datasets') + idx = 0 + identical_images = 0 + identical_labels = 0 + while True: + if idx % 100 == 0: + print('%d\r' % idx, end='', flush=True) + try: + images_a, labels_a = dset_a.get_minibatch_np(1) + except tf.errors.OutOfRangeError: + images_a, labels_a = None, None + try: + images_b, labels_b = dset_b.get_minibatch_np(1) + except tf.errors.OutOfRangeError: + images_b, labels_b = None, None + if images_a is None or images_b is None: + if images_a is not None or images_b is not None: + print('Datasets contain different number of images') + break + if images_a.shape == images_b.shape and np.all(images_a == images_b): + identical_images += 1 + else: + print('Image %d is different' % idx) + if labels_a.shape == labels_b.shape and np.all(labels_a == labels_b): + identical_labels += 1 + else: + print('Label %d is different' % idx) + idx += 1 + print('Identical images: %d / %d' % (identical_images, idx)) + if not ignore_labels: + print('Identical labels: %d / %d' % (identical_labels, idx)) + +#---------------------------------------------------------------------------- + +def create_mnist(tfrecord_dir, mnist_dir): + print('Loading MNIST from "%s"' % mnist_dir) + import gzip + with gzip.open(os.path.join(mnist_dir, 'train-images-idx3-ubyte.gz'), 'rb') as file: + images = np.frombuffer(file.read(), np.uint8, offset=16) + with gzip.open(os.path.join(mnist_dir, 'train-labels-idx1-ubyte.gz'), 'rb') as file: + labels = np.frombuffer(file.read(), np.uint8, offset=8) + images = images.reshape(-1, 1, 28, 28) + images = np.pad(images, [(0,0), (0,0), (2,2), (2,2)], 'constant', constant_values=0) + assert images.shape == (60000, 1, 32, 32) and images.dtype == np.uint8 + assert labels.shape == (60000,) and labels.dtype == np.uint8 + assert np.min(images) == 0 and np.max(images) == 255 + assert np.min(labels) == 0 and np.max(labels) == 9 + onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32) + onehot[np.arange(labels.size), labels] = 1.0 + + with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr: + order = tfr.choose_shuffled_order() + for idx in range(order.size): + tfr.add_image(images[order[idx]]) + tfr.add_labels(onehot[order]) + +#---------------------------------------------------------------------------- + +def create_mnistrgb(tfrecord_dir, mnist_dir, num_images=1000000, random_seed=123): + print('Loading MNIST from "%s"' % mnist_dir) + import gzip + with gzip.open(os.path.join(mnist_dir, 'train-images-idx3-ubyte.gz'), 'rb') as file: + images = np.frombuffer(file.read(), np.uint8, offset=16) + images = images.reshape(-1, 28, 28) + images = np.pad(images, [(0,0), (2,2), (2,2)], 'constant', constant_values=0) + assert images.shape == (60000, 32, 32) and images.dtype == np.uint8 + assert np.min(images) == 0 and np.max(images) == 255 + + with TFRecordExporter(tfrecord_dir, num_images) as tfr: + rnd = np.random.RandomState(random_seed) + for _idx in range(num_images): + tfr.add_image(images[rnd.randint(images.shape[0], size=3)]) + +#---------------------------------------------------------------------------- + +def create_cifar10(tfrecord_dir, cifar10_dir): + print('Loading CIFAR-10 from "%s"' % cifar10_dir) + import pickle + images = [] + labels = [] + for batch in range(1, 6): + with open(os.path.join(cifar10_dir, 'data_batch_%d' % batch), 'rb') as file: + data = pickle.load(file, encoding='latin1') + images.append(data['data'].reshape(-1, 3, 32, 32)) + labels.append(data['labels']) + images = np.concatenate(images) + labels = np.concatenate(labels) + assert images.shape == (50000, 3, 32, 32) and images.dtype == np.uint8 + assert labels.shape == (50000,) and labels.dtype == np.int32 + assert np.min(images) == 0 and np.max(images) == 255 + assert np.min(labels) == 0 and np.max(labels) == 9 + onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32) + onehot[np.arange(labels.size), labels] = 1.0 + + with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr: + order = tfr.choose_shuffled_order() + for idx in range(order.size): + tfr.add_image(images[order[idx]]) + tfr.add_labels(onehot[order]) + +#---------------------------------------------------------------------------- + +def create_cifar100(tfrecord_dir, cifar100_dir): + print('Loading CIFAR-100 from "%s"' % cifar100_dir) + import pickle + with open(os.path.join(cifar100_dir, 'train'), 'rb') as file: + data = pickle.load(file, encoding='latin1') + images = data['data'].reshape(-1, 3, 32, 32) + labels = np.array(data['fine_labels']) + assert images.shape == (50000, 3, 32, 32) and images.dtype == np.uint8 + assert labels.shape == (50000,) and labels.dtype == np.int32 + assert np.min(images) == 0 and np.max(images) == 255 + assert np.min(labels) == 0 and np.max(labels) == 99 + onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32) + onehot[np.arange(labels.size), labels] = 1.0 + + with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr: + order = tfr.choose_shuffled_order() + for idx in range(order.size): + tfr.add_image(images[order[idx]]) + tfr.add_labels(onehot[order]) + +#---------------------------------------------------------------------------- + +def create_svhn(tfrecord_dir, svhn_dir): + print('Loading SVHN from "%s"' % svhn_dir) + import pickle + images = [] + labels = [] + for batch in range(1, 4): + with open(os.path.join(svhn_dir, 'train_%d.pkl' % batch), 'rb') as file: + data = pickle.load(file, encoding='latin1') + images.append(data[0]) + labels.append(data[1]) + images = np.concatenate(images) + labels = np.concatenate(labels) + assert images.shape == (73257, 3, 32, 32) and images.dtype == np.uint8 + assert labels.shape == (73257,) and labels.dtype == np.uint8 + assert np.min(images) == 0 and np.max(images) == 255 + assert np.min(labels) == 0 and np.max(labels) == 9 + onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32) + onehot[np.arange(labels.size), labels] = 1.0 + + with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr: + order = tfr.choose_shuffled_order() + for idx in range(order.size): + tfr.add_image(images[order[idx]]) + tfr.add_labels(onehot[order]) + +#---------------------------------------------------------------------------- + +def create_lsun(tfrecord_dir, lmdb_dir, resolution=256, max_images=None): + print('Loading LSUN dataset from "%s"' % lmdb_dir) + import lmdb # pip install lmdb # pylint: disable=import-error + import cv2 # pip install opencv-python + import io + with lmdb.open(lmdb_dir, readonly=True).begin(write=False) as txn: + total_images = txn.stat()['entries'] # pylint: disable=no-value-for-parameter + if max_images is None: + max_images = total_images + with TFRecordExporter(tfrecord_dir, max_images) as tfr: + for _idx, (_key, value) in enumerate(txn.cursor()): + try: + try: + img = cv2.imdecode(np.fromstring(value, dtype=np.uint8), 1) + if img is None: + raise IOError('cv2.imdecode failed') + img = img[:, :, ::-1] # BGR => RGB + except IOError: + img = np.asarray(PIL.Image.open(io.BytesIO(value))) + crop = np.min(img.shape[:2]) + img = img[(img.shape[0] - crop) // 2 : (img.shape[0] + crop) // 2, (img.shape[1] - crop) // 2 : (img.shape[1] + crop) // 2] + img = PIL.Image.fromarray(img, 'RGB') + img = img.resize((resolution, resolution), PIL.Image.ANTIALIAS) + img = np.asarray(img) + img = img.transpose([2, 0, 1]) # HWC => CHW + tfr.add_image(img) + except: + print(sys.exc_info()[1]) + if tfr.cur_images == max_images: + break + +#---------------------------------------------------------------------------- + +def create_lsun_wide(tfrecord_dir, lmdb_dir, width=512, height=384, max_images=None): + assert width == 2 ** int(np.round(np.log2(width))) + assert height <= width + print('Loading LSUN dataset from "%s"' % lmdb_dir) + import lmdb # pip install lmdb # pylint: disable=import-error + import cv2 # pip install opencv-python + import io + with lmdb.open(lmdb_dir, readonly=True).begin(write=False) as txn: + total_images = txn.stat()['entries'] # pylint: disable=no-value-for-parameter + if max_images is None: + max_images = total_images + with TFRecordExporter(tfrecord_dir, max_images, print_progress=False) as tfr: + for idx, (_key, value) in enumerate(txn.cursor()): + try: + try: + img = cv2.imdecode(np.fromstring(value, dtype=np.uint8), 1) + if img is None: + raise IOError('cv2.imdecode failed') + img = img[:, :, ::-1] # BGR => RGB + except IOError: + img = np.asarray(PIL.Image.open(io.BytesIO(value))) + + ch = int(np.round(width * img.shape[0] / img.shape[1])) + if img.shape[1] < width or ch < height: + continue + + img = img[(img.shape[0] - ch) // 2 : (img.shape[0] + ch) // 2] + img = PIL.Image.fromarray(img, 'RGB') + img = img.resize((width, height), PIL.Image.ANTIALIAS) + img = np.asarray(img) + img = img.transpose([2, 0, 1]) # HWC => CHW + + canvas = np.zeros([3, width, width], dtype=np.uint8) + canvas[:, (width - height) // 2 : (width + height) // 2] = img + tfr.add_image(canvas) + print('\r%d / %d => %d ' % (idx + 1, total_images, tfr.cur_images), end='') + + except: + print(sys.exc_info()[1]) + if tfr.cur_images == max_images: + break + print() + +#---------------------------------------------------------------------------- + +def create_celeba(tfrecord_dir, celeba_dir, cx=89, cy=121): + print('Loading CelebA from "%s"' % celeba_dir) + glob_pattern = os.path.join(celeba_dir, 'img_align_celeba_png', '*.png') + image_filenames = sorted(glob.glob(glob_pattern)) + expected_images = 202599 + if len(image_filenames) != expected_images: + error('Expected to find %d images' % expected_images) + + with TFRecordExporter(tfrecord_dir, len(image_filenames)) as tfr: + order = tfr.choose_shuffled_order() + for idx in range(order.size): + img = np.asarray(PIL.Image.open(image_filenames[order[idx]])) + assert img.shape == (218, 178, 3) + img = img[cy - 64 : cy + 64, cx - 64 : cx + 64] + img = img.transpose(2, 0, 1) # HWC => CHW + tfr.add_image(img) + +#---------------------------------------------------------------------------- + +def create_from_images(tfrecord_dir, image_dir, shuffle): + print('Loading images from "%s"' % image_dir) + image_filenames = sorted(glob.glob(os.path.join(image_dir, '*'))) + if len(image_filenames) == 0: + error('No input images found') + + img = np.asarray(PIL.Image.open(image_filenames[0])) + resolution = img.shape[0] + channels = img.shape[2] if img.ndim == 3 else 1 + if img.shape[1] != resolution: + error('Input images must have the same width and height') + if resolution != 2 ** int(np.floor(np.log2(resolution))): + error('Input image resolution must be a power-of-two') + if channels not in [1, 3]: + error('Input images must be stored as RGB or grayscale') + + with TFRecordExporter(tfrecord_dir, len(image_filenames)) as tfr: + order = tfr.choose_shuffled_order() if shuffle else np.arange(len(image_filenames)) + for idx in range(order.size): + img = np.asarray(PIL.Image.open(image_filenames[order[idx]])) + if channels == 1: + img = img[np.newaxis, :, :] # HW => CHW + else: + img = img.transpose([2, 0, 1]) # HWC => CHW + tfr.add_image(img) + +#---------------------------------------------------------------------------- + +def create_from_hdf5(tfrecord_dir, hdf5_filename, shuffle): + print('Loading HDF5 archive from "%s"' % hdf5_filename) + import h5py # conda install h5py + with h5py.File(hdf5_filename, 'r') as hdf5_file: + hdf5_data = max([value for key, value in hdf5_file.items() if key.startswith('data')], key=lambda lod: lod.shape[3]) + with TFRecordExporter(tfrecord_dir, hdf5_data.shape[0]) as tfr: + order = tfr.choose_shuffled_order() if shuffle else np.arange(hdf5_data.shape[0]) + for idx in range(order.size): + tfr.add_image(hdf5_data[order[idx]]) + npy_filename = os.path.splitext(hdf5_filename)[0] + '-labels.npy' + if os.path.isfile(npy_filename): + tfr.add_labels(np.load(npy_filename)[order]) + +#---------------------------------------------------------------------------- + +def execute_cmdline(argv): + prog = argv[0] + parser = argparse.ArgumentParser( + prog = prog, + description = 'Tool for creating multi-resolution TFRecords datasets for StyleGAN and ProGAN.', + epilog = 'Type "%s -h" for more information.' % prog) + + subparsers = parser.add_subparsers(dest='command') + subparsers.required = True + def add_command(cmd, desc, example=None): + epilog = 'Example: %s %s' % (prog, example) if example is not None else None + return subparsers.add_parser(cmd, description=desc, help=desc, epilog=epilog) + + p = add_command( 'display', 'Display images in dataset.', + 'display datasets/mnist') + p.add_argument( 'tfrecord_dir', help='Directory containing dataset') + + p = add_command( 'extract', 'Extract images from dataset.', + 'extract datasets/mnist mnist-images') + p.add_argument( 'tfrecord_dir', help='Directory containing dataset') + p.add_argument( 'output_dir', help='Directory to extract the images into') + + p = add_command( 'compare', 'Compare two datasets.', + 'compare datasets/mydataset datasets/mnist') + p.add_argument( 'tfrecord_dir_a', help='Directory containing first dataset') + p.add_argument( 'tfrecord_dir_b', help='Directory containing second dataset') + p.add_argument( '--ignore_labels', help='Ignore labels (default: 0)', type=int, default=0) + + p = add_command( 'create_mnist', 'Create dataset for MNIST.', + 'create_mnist datasets/mnist ~/downloads/mnist') + p.add_argument( 'tfrecord_dir', help='New dataset directory to be created') + p.add_argument( 'mnist_dir', help='Directory containing MNIST') + + p = add_command( 'create_mnistrgb', 'Create dataset for MNIST-RGB.', + 'create_mnistrgb datasets/mnistrgb ~/downloads/mnist') + p.add_argument( 'tfrecord_dir', help='New dataset directory to be created') + p.add_argument( 'mnist_dir', help='Directory containing MNIST') + p.add_argument( '--num_images', help='Number of composite images to create (default: 1000000)', type=int, default=1000000) + p.add_argument( '--random_seed', help='Random seed (default: 123)', type=int, default=123) + + p = add_command( 'create_cifar10', 'Create dataset for CIFAR-10.', + 'create_cifar10 datasets/cifar10 ~/downloads/cifar10') + p.add_argument( 'tfrecord_dir', help='New dataset directory to be created') + p.add_argument( 'cifar10_dir', help='Directory containing CIFAR-10') + + p = add_command( 'create_cifar100', 'Create dataset for CIFAR-100.', + 'create_cifar100 datasets/cifar100 ~/downloads/cifar100') + p.add_argument( 'tfrecord_dir', help='New dataset directory to be created') + p.add_argument( 'cifar100_dir', help='Directory containing CIFAR-100') + + p = add_command( 'create_svhn', 'Create dataset for SVHN.', + 'create_svhn datasets/svhn ~/downloads/svhn') + p.add_argument( 'tfrecord_dir', help='New dataset directory to be created') + p.add_argument( 'svhn_dir', help='Directory containing SVHN') + + p = add_command( 'create_lsun', 'Create dataset for single LSUN category.', + 'create_lsun datasets/lsun-car-100k ~/downloads/lsun/car_lmdb --resolution 256 --max_images 100000') + p.add_argument( 'tfrecord_dir', help='New dataset directory to be created') + p.add_argument( 'lmdb_dir', help='Directory containing LMDB database') + p.add_argument( '--resolution', help='Output resolution (default: 256)', type=int, default=256) + p.add_argument( '--max_images', help='Maximum number of images (default: none)', type=int, default=None) + + p = add_command( 'create_lsun_wide', 'Create LSUN dataset with non-square aspect ratio.', + 'create_lsun_wide datasets/lsun-car-512x384 ~/downloads/lsun/car_lmdb --width 512 --height 384') + p.add_argument( 'tfrecord_dir', help='New dataset directory to be created') + p.add_argument( 'lmdb_dir', help='Directory containing LMDB database') + p.add_argument( '--width', help='Output width (default: 512)', type=int, default=512) + p.add_argument( '--height', help='Output height (default: 384)', type=int, default=384) + p.add_argument( '--max_images', help='Maximum number of images (default: none)', type=int, default=None) + + p = add_command( 'create_celeba', 'Create dataset for CelebA.', + 'create_celeba datasets/celeba ~/downloads/celeba') + p.add_argument( 'tfrecord_dir', help='New dataset directory to be created') + p.add_argument( 'celeba_dir', help='Directory containing CelebA') + p.add_argument( '--cx', help='Center X coordinate (default: 89)', type=int, default=89) + p.add_argument( '--cy', help='Center Y coordinate (default: 121)', type=int, default=121) + + p = add_command( 'create_from_images', 'Create dataset from a directory full of images.', + 'create_from_images datasets/mydataset myimagedir') + p.add_argument( 'tfrecord_dir', help='New dataset directory to be created') + p.add_argument( 'image_dir', help='Directory containing the images') + p.add_argument( '--shuffle', help='Randomize image order (default: 1)', type=int, default=1) + + p = add_command( 'create_from_hdf5', 'Create dataset from legacy HDF5 archive.', + 'create_from_hdf5 datasets/celebahq ~/downloads/celeba-hq-1024x1024.h5') + p.add_argument( 'tfrecord_dir', help='New dataset directory to be created') + p.add_argument( 'hdf5_filename', help='HDF5 archive containing the images') + p.add_argument( '--shuffle', help='Randomize image order (default: 1)', type=int, default=1) + + args = parser.parse_args(argv[1:] if len(argv) > 1 else ['-h']) + func = globals()[args.command] + del args.command + func(**vars(args)) + +#---------------------------------------------------------------------------- + +if __name__ == "__main__": + execute_cmdline(sys.argv) + +#---------------------------------------------------------------------------- diff --git a/models/stylegan_tf_official/dnnlib/__init__.py b/models/stylegan_tf_official/dnnlib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ad43827d8a279c4a797e09b51b8fd96e8e003ee6 --- /dev/null +++ b/models/stylegan_tf_official/dnnlib/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +from . import submission + +from .submission.run_context import RunContext + +from .submission.submit import SubmitTarget +from .submission.submit import PathType +from .submission.submit import SubmitConfig +from .submission.submit import get_path_from_template +from .submission.submit import submit_run + +from .util import EasyDict + +submit_config: SubmitConfig = None # Package level variable for SubmitConfig which is only valid when inside the run function. diff --git a/models/stylegan_tf_official/dnnlib/submission/__init__.py b/models/stylegan_tf_official/dnnlib/submission/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..53856121d673459ae2b21ecef3d0fcb12a12cdfe --- /dev/null +++ b/models/stylegan_tf_official/dnnlib/submission/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +from . import run_context +from . import submit diff --git a/models/stylegan_tf_official/dnnlib/submission/_internal/run.py b/models/stylegan_tf_official/dnnlib/submission/_internal/run.py new file mode 100644 index 0000000000000000000000000000000000000000..18f830d81ead15fece09382cc30654fb89d14d1b --- /dev/null +++ b/models/stylegan_tf_official/dnnlib/submission/_internal/run.py @@ -0,0 +1,45 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +"""Helper for launching run functions in computing clusters. + +During the submit process, this file is copied to the appropriate run dir. +When the job is launched in the cluster, this module is the first thing that +is run inside the docker container. +""" + +import os +import pickle +import sys + +# PYTHONPATH should have been set so that the run_dir/src is in it +import dnnlib + +def main(): + if not len(sys.argv) >= 4: + raise RuntimeError("This script needs three arguments: run_dir, task_name and host_name!") + + run_dir = str(sys.argv[1]) + task_name = str(sys.argv[2]) + host_name = str(sys.argv[3]) + + submit_config_path = os.path.join(run_dir, "submit_config.pkl") + + # SubmitConfig should have been pickled to the run dir + if not os.path.exists(submit_config_path): + raise RuntimeError("SubmitConfig pickle file does not exist!") + + submit_config: dnnlib.SubmitConfig = pickle.load(open(submit_config_path, "rb")) + dnnlib.submission.submit.set_user_name_override(submit_config.user_name) + + submit_config.task_name = task_name + submit_config.host_name = host_name + + dnnlib.submission.submit.run_wrapper(submit_config) + +if __name__ == "__main__": + main() diff --git a/models/stylegan_tf_official/dnnlib/submission/run_context.py b/models/stylegan_tf_official/dnnlib/submission/run_context.py new file mode 100644 index 0000000000000000000000000000000000000000..932320e4735bde1b547ac6062b175601b7959547 --- /dev/null +++ b/models/stylegan_tf_official/dnnlib/submission/run_context.py @@ -0,0 +1,99 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +"""Helpers for managing the run/training loop.""" + +import datetime +import json +import os +import pprint +import time +import types + +from typing import Any + +from . import submit + + +class RunContext(object): + """Helper class for managing the run/training loop. + + The context will hide the implementation details of a basic run/training loop. + It will set things up properly, tell if run should be stopped, and then cleans up. + User should call update periodically and use should_stop to determine if run should be stopped. + + Args: + submit_config: The SubmitConfig that is used for the current run. + config_module: The whole config module that is used for the current run. + max_epoch: Optional cached value for the max_epoch variable used in update. + """ + + def __init__(self, submit_config: submit.SubmitConfig, config_module: types.ModuleType = None, max_epoch: Any = None): + self.submit_config = submit_config + self.should_stop_flag = False + self.has_closed = False + self.start_time = time.time() + self.last_update_time = time.time() + self.last_update_interval = 0.0 + self.max_epoch = max_epoch + + # pretty print the all the relevant content of the config module to a text file + if config_module is not None: + with open(os.path.join(submit_config.run_dir, "config.txt"), "w") as f: + filtered_dict = {k: v for k, v in config_module.__dict__.items() if not k.startswith("_") and not isinstance(v, (types.ModuleType, types.FunctionType, types.LambdaType, submit.SubmitConfig, type))} + pprint.pprint(filtered_dict, stream=f, indent=4, width=200, compact=False) + + # write out details about the run to a text file + self.run_txt_data = {"task_name": submit_config.task_name, "host_name": submit_config.host_name, "start_time": datetime.datetime.now().isoformat(sep=" ")} + with open(os.path.join(submit_config.run_dir, "run.txt"), "w") as f: + pprint.pprint(self.run_txt_data, stream=f, indent=4, width=200, compact=False) + + def __enter__(self) -> "RunContext": + return self + + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: + self.close() + + def update(self, loss: Any = 0, cur_epoch: Any = 0, max_epoch: Any = None) -> None: + """Do general housekeeping and keep the state of the context up-to-date. + Should be called often enough but not in a tight loop.""" + assert not self.has_closed + + self.last_update_interval = time.time() - self.last_update_time + self.last_update_time = time.time() + + if os.path.exists(os.path.join(self.submit_config.run_dir, "abort.txt")): + self.should_stop_flag = True + + max_epoch_val = self.max_epoch if max_epoch is None else max_epoch + + def should_stop(self) -> bool: + """Tell whether a stopping condition has been triggered one way or another.""" + return self.should_stop_flag + + def get_time_since_start(self) -> float: + """How much time has passed since the creation of the context.""" + return time.time() - self.start_time + + def get_time_since_last_update(self) -> float: + """How much time has passed since the last call to update.""" + return time.time() - self.last_update_time + + def get_last_update_interval(self) -> float: + """How much time passed between the previous two calls to update.""" + return self.last_update_interval + + def close(self) -> None: + """Close the context and clean up. + Should only be called once.""" + if not self.has_closed: + # update the run.txt with stopping time + self.run_txt_data["stop_time"] = datetime.datetime.now().isoformat(sep=" ") + with open(os.path.join(self.submit_config.run_dir, "run.txt"), "w") as f: + pprint.pprint(self.run_txt_data, stream=f, indent=4, width=200, compact=False) + + self.has_closed = True diff --git a/models/stylegan_tf_official/dnnlib/submission/submit.py b/models/stylegan_tf_official/dnnlib/submission/submit.py new file mode 100644 index 0000000000000000000000000000000000000000..60ff428717c13896bb78625b3eaf651d9fb9695d --- /dev/null +++ b/models/stylegan_tf_official/dnnlib/submission/submit.py @@ -0,0 +1,290 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +"""Submit a function to be run either locally or in a computing cluster.""" + +import copy +import io +import os +import pathlib +import pickle +import platform +import pprint +import re +import shutil +import time +import traceback + +import zipfile + +from enum import Enum + +from .. import util +from ..util import EasyDict + + +class SubmitTarget(Enum): + """The target where the function should be run. + + LOCAL: Run it locally. + """ + LOCAL = 1 + + +class PathType(Enum): + """Determines in which format should a path be formatted. + + WINDOWS: Format with Windows style. + LINUX: Format with Linux/Posix style. + AUTO: Use current OS type to select either WINDOWS or LINUX. + """ + WINDOWS = 1 + LINUX = 2 + AUTO = 3 + + +_user_name_override = None + + +class SubmitConfig(util.EasyDict): + """Strongly typed config dict needed to submit runs. + + Attributes: + run_dir_root: Path to the run dir root. Can be optionally templated with tags. Needs to always be run through get_path_from_template. + run_desc: Description of the run. Will be used in the run dir and task name. + run_dir_ignore: List of file patterns used to ignore files when copying files to the run dir. + run_dir_extra_files: List of (abs_path, rel_path) tuples of file paths. rel_path root will be the src directory inside the run dir. + submit_target: Submit target enum value. Used to select where the run is actually launched. + num_gpus: Number of GPUs used/requested for the run. + print_info: Whether to print debug information when submitting. + ask_confirmation: Whether to ask a confirmation before submitting. + run_id: Automatically populated value during submit. + run_name: Automatically populated value during submit. + run_dir: Automatically populated value during submit. + run_func_name: Automatically populated value during submit. + run_func_kwargs: Automatically populated value during submit. + user_name: Automatically populated value during submit. Can be set by the user which will then override the automatic value. + task_name: Automatically populated value during submit. + host_name: Automatically populated value during submit. + """ + + def __init__(self): + super().__init__() + + # run (set these) + self.run_dir_root = "" # should always be passed through get_path_from_template + self.run_desc = "" + self.run_dir_ignore = ["__pycache__", "*.pyproj", "*.sln", "*.suo", ".cache", ".idea", ".vs", ".vscode"] + self.run_dir_extra_files = None + + # submit (set these) + self.submit_target = SubmitTarget.LOCAL + self.num_gpus = 1 + self.print_info = False + self.ask_confirmation = False + + # (automatically populated) + self.run_id = None + self.run_name = None + self.run_dir = None + self.run_func_name = None + self.run_func_kwargs = None + self.user_name = None + self.task_name = None + self.host_name = "localhost" + + +def get_path_from_template(path_template: str, path_type: PathType = PathType.AUTO) -> str: + """Replace tags in the given path template and return either Windows or Linux formatted path.""" + # automatically select path type depending on running OS + if path_type == PathType.AUTO: + if platform.system() == "Windows": + path_type = PathType.WINDOWS + elif platform.system() == "Linux": + path_type = PathType.LINUX + else: + raise RuntimeError("Unknown platform") + + path_template = path_template.replace("", get_user_name()) + + # return correctly formatted path + if path_type == PathType.WINDOWS: + return str(pathlib.PureWindowsPath(path_template)) + elif path_type == PathType.LINUX: + return str(pathlib.PurePosixPath(path_template)) + else: + raise RuntimeError("Unknown platform") + + +def get_template_from_path(path: str) -> str: + """Convert a normal path back to its template representation.""" + # replace all path parts with the template tags + path = path.replace("\\", "/") + return path + + +def convert_path(path: str, path_type: PathType = PathType.AUTO) -> str: + """Convert a normal path to template and the convert it back to a normal path with given path type.""" + path_template = get_template_from_path(path) + path = get_path_from_template(path_template, path_type) + return path + + +def set_user_name_override(name: str) -> None: + """Set the global username override value.""" + global _user_name_override + _user_name_override = name + + +def get_user_name(): + """Get the current user name.""" + if _user_name_override is not None: + return _user_name_override + elif platform.system() == "Windows": + return os.getlogin() + elif platform.system() == "Linux": + try: + import pwd # pylint: disable=import-error + return pwd.getpwuid(os.geteuid()).pw_name # pylint: disable=no-member + except: + return "unknown" + else: + raise RuntimeError("Unknown platform") + + +def _create_run_dir_local(submit_config: SubmitConfig) -> str: + """Create a new run dir with increasing ID number at the start.""" + run_dir_root = get_path_from_template(submit_config.run_dir_root, PathType.AUTO) + + if not os.path.exists(run_dir_root): + print("Creating the run dir root: {}".format(run_dir_root)) + os.makedirs(run_dir_root) + + submit_config.run_id = _get_next_run_id_local(run_dir_root) + submit_config.run_name = "{0:05d}-{1}".format(submit_config.run_id, submit_config.run_desc) + run_dir = os.path.join(run_dir_root, submit_config.run_name) + + if os.path.exists(run_dir): + raise RuntimeError("The run dir already exists! ({0})".format(run_dir)) + + print("Creating the run dir: {}".format(run_dir)) + os.makedirs(run_dir) + + return run_dir + + +def _get_next_run_id_local(run_dir_root: str) -> int: + """Reads all directory names in a given directory (non-recursive) and returns the next (increasing) run id. Assumes IDs are numbers at the start of the directory names.""" + dir_names = [d for d in os.listdir(run_dir_root) if os.path.isdir(os.path.join(run_dir_root, d))] + r = re.compile("^\\d+") # match one or more digits at the start of the string + run_id = 0 + + for dir_name in dir_names: + m = r.match(dir_name) + + if m is not None: + i = int(m.group()) + run_id = max(run_id, i + 1) + + return run_id + + +def _populate_run_dir(run_dir: str, submit_config: SubmitConfig) -> None: + """Copy all necessary files into the run dir. Assumes that the dir exists, is local, and is writable.""" + print("Copying files to the run dir") + files = [] + + run_func_module_dir_path = util.get_module_dir_by_obj_name(submit_config.run_func_name) + assert '.' in submit_config.run_func_name + for _idx in range(submit_config.run_func_name.count('.') - 1): + run_func_module_dir_path = os.path.dirname(run_func_module_dir_path) + files += util.list_dir_recursively_with_ignore(run_func_module_dir_path, ignores=submit_config.run_dir_ignore, add_base_to_relative=False) + + dnnlib_module_dir_path = util.get_module_dir_by_obj_name("dnnlib") + files += util.list_dir_recursively_with_ignore(dnnlib_module_dir_path, ignores=submit_config.run_dir_ignore, add_base_to_relative=True) + + if submit_config.run_dir_extra_files is not None: + files += submit_config.run_dir_extra_files + + files = [(f[0], os.path.join(run_dir, "src", f[1])) for f in files] + files += [(os.path.join(dnnlib_module_dir_path, "submission", "_internal", "run.py"), os.path.join(run_dir, "run.py"))] + + util.copy_files_and_create_dirs(files) + + pickle.dump(submit_config, open(os.path.join(run_dir, "submit_config.pkl"), "wb")) + + with open(os.path.join(run_dir, "submit_config.txt"), "w") as f: + pprint.pprint(submit_config, stream=f, indent=4, width=200, compact=False) + + +def run_wrapper(submit_config: SubmitConfig) -> None: + """Wrap the actual run function call for handling logging, exceptions, typing, etc.""" + is_local = submit_config.submit_target == SubmitTarget.LOCAL + + checker = None + + # when running locally, redirect stderr to stdout, log stdout to a file, and force flushing + if is_local: + logger = util.Logger(file_name=os.path.join(submit_config.run_dir, "log.txt"), file_mode="w", should_flush=True) + else: # when running in a cluster, redirect stderr to stdout, and just force flushing (log writing is handled by run.sh) + logger = util.Logger(file_name=None, should_flush=True) + + import dnnlib + dnnlib.submit_config = submit_config + + try: + print("dnnlib: Running {0}() on {1}...".format(submit_config.run_func_name, submit_config.host_name)) + start_time = time.time() + util.call_func_by_name(func_name=submit_config.run_func_name, submit_config=submit_config, **submit_config.run_func_kwargs) + print("dnnlib: Finished {0}() in {1}.".format(submit_config.run_func_name, util.format_time(time.time() - start_time))) + except: + if is_local: + raise + else: + traceback.print_exc() + + log_src = os.path.join(submit_config.run_dir, "log.txt") + log_dst = os.path.join(get_path_from_template(submit_config.run_dir_root), "{0}-error.txt".format(submit_config.run_name)) + shutil.copyfile(log_src, log_dst) + finally: + open(os.path.join(submit_config.run_dir, "_finished.txt"), "w").close() + + dnnlib.submit_config = None + logger.close() + + if checker is not None: + checker.stop() + + +def submit_run(submit_config: SubmitConfig, run_func_name: str, **run_func_kwargs) -> None: + """Create a run dir, gather files related to the run, copy files to the run dir, and launch the run in appropriate place.""" + submit_config = copy.copy(submit_config) + + if submit_config.user_name is None: + submit_config.user_name = get_user_name() + + submit_config.run_func_name = run_func_name + submit_config.run_func_kwargs = run_func_kwargs + + assert submit_config.submit_target == SubmitTarget.LOCAL + if submit_config.submit_target in {SubmitTarget.LOCAL}: + run_dir = _create_run_dir_local(submit_config) + + submit_config.task_name = "{0}-{1:05d}-{2}".format(submit_config.user_name, submit_config.run_id, submit_config.run_desc) + submit_config.run_dir = run_dir + _populate_run_dir(run_dir, submit_config) + + if submit_config.print_info: + print("\nSubmit config:\n") + pprint.pprint(submit_config, indent=4, width=200, compact=False) + print() + + if submit_config.ask_confirmation: + if not util.ask_yes_no("Continue submitting the job?"): + return + + run_wrapper(submit_config) diff --git a/models/stylegan_tf_official/dnnlib/tflib/__init__.py b/models/stylegan_tf_official/dnnlib/tflib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f054a39cb81e38ca8b1f4ad5bac168aa68e7d92e --- /dev/null +++ b/models/stylegan_tf_official/dnnlib/tflib/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +from . import autosummary +from . import network +from . import optimizer +from . import tfutil + +from .tfutil import * +from .network import Network + +from .optimizer import Optimizer diff --git a/models/stylegan_tf_official/dnnlib/tflib/autosummary.py b/models/stylegan_tf_official/dnnlib/tflib/autosummary.py new file mode 100644 index 0000000000000000000000000000000000000000..43154f792e5ebe15ee6045a5acdfb279cebefcaa --- /dev/null +++ b/models/stylegan_tf_official/dnnlib/tflib/autosummary.py @@ -0,0 +1,184 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +"""Helper for adding automatically tracked values to Tensorboard. + +Autosummary creates an identity op that internally keeps track of the input +values and automatically shows up in TensorBoard. The reported value +represents an average over input components. The average is accumulated +constantly over time and flushed when save_summaries() is called. + +Notes: +- The output tensor must be used as an input for something else in the + graph. Otherwise, the autosummary op will not get executed, and the average + value will not get accumulated. +- It is perfectly fine to include autosummaries with the same name in + several places throughout the graph, even if they are executed concurrently. +- It is ok to also pass in a python scalar or numpy array. In this case, it + is added to the average immediately. +""" + +from collections import OrderedDict +import numpy as np +import tensorflow as tf +from tensorboard import summary as summary_lib +from tensorboard.plugins.custom_scalar import layout_pb2 + +from . import tfutil +from .tfutil import TfExpression +from .tfutil import TfExpressionEx + +_dtype = tf.float64 +_vars = OrderedDict() # name => [var, ...] +_immediate = OrderedDict() # name => update_op, update_value +_finalized = False +_merge_op = None + + +def _create_var(name: str, value_expr: TfExpression) -> TfExpression: + """Internal helper for creating autosummary accumulators.""" + assert not _finalized + name_id = name.replace("/", "_") + v = tf.cast(value_expr, _dtype) + + if v.shape.is_fully_defined(): + size = np.prod(tfutil.shape_to_list(v.shape)) + size_expr = tf.constant(size, dtype=_dtype) + else: + size = None + size_expr = tf.reduce_prod(tf.cast(tf.shape(v), _dtype)) + + if size == 1: + if v.shape.ndims != 0: + v = tf.reshape(v, []) + v = [size_expr, v, tf.square(v)] + else: + v = [size_expr, tf.reduce_sum(v), tf.reduce_sum(tf.square(v))] + v = tf.cond(tf.is_finite(v[1]), lambda: tf.stack(v), lambda: tf.zeros(3, dtype=_dtype)) + + with tfutil.absolute_name_scope("Autosummary/" + name_id), tf.control_dependencies(None): + var = tf.Variable(tf.zeros(3, dtype=_dtype), trainable=False) # [sum(1), sum(x), sum(x**2)] + update_op = tf.cond(tf.is_variable_initialized(var), lambda: tf.assign_add(var, v), lambda: tf.assign(var, v)) + + if name in _vars: + _vars[name].append(var) + else: + _vars[name] = [var] + return update_op + + +def autosummary(name: str, value: TfExpressionEx, passthru: TfExpressionEx = None) -> TfExpressionEx: + """Create a new autosummary. + + Args: + name: Name to use in TensorBoard + value: TensorFlow expression or python value to track + passthru: Optionally return this TF node without modifications but tack an autosummary update side-effect to this node. + + Example use of the passthru mechanism: + + n = autosummary('l2loss', loss, passthru=n) + + This is a shorthand for the following code: + + with tf.control_dependencies([autosummary('l2loss', loss)]): + n = tf.identity(n) + """ + tfutil.assert_tf_initialized() + name_id = name.replace("/", "_") + + if tfutil.is_tf_expression(value): + with tf.name_scope("summary_" + name_id), tf.device(value.device): + update_op = _create_var(name, value) + with tf.control_dependencies([update_op]): + return tf.identity(value if passthru is None else passthru) + + else: # python scalar or numpy array + if name not in _immediate: + with tfutil.absolute_name_scope("Autosummary/" + name_id), tf.device(None), tf.control_dependencies(None): + update_value = tf.placeholder(_dtype) + update_op = _create_var(name, update_value) + _immediate[name] = update_op, update_value + + update_op, update_value = _immediate[name] + tfutil.run(update_op, {update_value: value}) + return value if passthru is None else passthru + + +def finalize_autosummaries() -> None: + """Create the necessary ops to include autosummaries in TensorBoard report. + Note: This should be done only once per graph. + """ + global _finalized + tfutil.assert_tf_initialized() + + if _finalized: + return None + + _finalized = True + tfutil.init_uninitialized_vars([var for vars_list in _vars.values() for var in vars_list]) + + # Create summary ops. + with tf.device(None), tf.control_dependencies(None): + for name, vars_list in _vars.items(): + name_id = name.replace("/", "_") + with tfutil.absolute_name_scope("Autosummary/" + name_id): + moments = tf.add_n(vars_list) + moments /= moments[0] + with tf.control_dependencies([moments]): # read before resetting + reset_ops = [tf.assign(var, tf.zeros(3, dtype=_dtype)) for var in vars_list] + with tf.name_scope(None), tf.control_dependencies(reset_ops): # reset before reporting + mean = moments[1] + std = tf.sqrt(moments[2] - tf.square(moments[1])) + tf.summary.scalar(name, mean) + tf.summary.scalar("xCustomScalars/" + name + "/margin_lo", mean - std) + tf.summary.scalar("xCustomScalars/" + name + "/margin_hi", mean + std) + + # Group by category and chart name. + cat_dict = OrderedDict() + for series_name in sorted(_vars.keys()): + p = series_name.split("/") + cat = p[0] if len(p) >= 2 else "" + chart = "/".join(p[1:-1]) if len(p) >= 3 else p[-1] + if cat not in cat_dict: + cat_dict[cat] = OrderedDict() + if chart not in cat_dict[cat]: + cat_dict[cat][chart] = [] + cat_dict[cat][chart].append(series_name) + + # Setup custom_scalar layout. + categories = [] + for cat_name, chart_dict in cat_dict.items(): + charts = [] + for chart_name, series_names in chart_dict.items(): + series = [] + for series_name in series_names: + series.append(layout_pb2.MarginChartContent.Series( + value=series_name, + lower="xCustomScalars/" + series_name + "/margin_lo", + upper="xCustomScalars/" + series_name + "/margin_hi")) + margin = layout_pb2.MarginChartContent(series=series) + charts.append(layout_pb2.Chart(title=chart_name, margin=margin)) + categories.append(layout_pb2.Category(title=cat_name, chart=charts)) + layout = summary_lib.custom_scalar_pb(layout_pb2.Layout(category=categories)) + return layout + +def save_summaries(file_writer, global_step=None): + """Call FileWriter.add_summary() with all summaries in the default graph, + automatically finalizing and merging them on the first call. + """ + global _merge_op + tfutil.assert_tf_initialized() + + if _merge_op is None: + layout = finalize_autosummaries() + if layout is not None: + file_writer.add_summary(layout) + with tf.device(None), tf.control_dependencies(None): + _merge_op = tf.summary.merge_all() + + file_writer.add_summary(_merge_op.eval(), global_step) diff --git a/models/stylegan_tf_official/dnnlib/tflib/network.py b/models/stylegan_tf_official/dnnlib/tflib/network.py new file mode 100644 index 0000000000000000000000000000000000000000..d888a90dd23c1a941b5fb501afec1efcb763b5ea --- /dev/null +++ b/models/stylegan_tf_official/dnnlib/tflib/network.py @@ -0,0 +1,591 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +"""Helper for managing networks.""" + +import types +import inspect +import re +import uuid +import sys +import numpy as np +import tensorflow as tf + +from collections import OrderedDict +from typing import Any, List, Tuple, Union + +from . import tfutil +from .. import util + +from .tfutil import TfExpression, TfExpressionEx + +_import_handlers = [] # Custom import handlers for dealing with legacy data in pickle import. +_import_module_src = dict() # Source code for temporary modules created during pickle import. + + +def import_handler(handler_func): + """Function decorator for declaring custom import handlers.""" + _import_handlers.append(handler_func) + return handler_func + + +class Network: + """Generic network abstraction. + + Acts as a convenience wrapper for a parameterized network construction + function, providing several utility methods and convenient access to + the inputs/outputs/weights. + + Network objects can be safely pickled and unpickled for long-term + archival purposes. The pickling works reliably as long as the underlying + network construction function is defined in a standalone Python module + that has no side effects or application-specific imports. + + Args: + name: Network name. Used to select TensorFlow name and variable scopes. + func_name: Fully qualified name of the underlying network construction function, or a top-level function object. + static_kwargs: Keyword arguments to be passed in to the network construction function. + + Attributes: + name: User-specified name, defaults to build func name if None. + scope: Unique TensorFlow scope containing template graph and variables, derived from the user-specified name. + static_kwargs: Arguments passed to the user-supplied build func. + components: Container for sub-networks. Passed to the build func, and retained between calls. + num_inputs: Number of input tensors. + num_outputs: Number of output tensors. + input_shapes: Input tensor shapes (NC or NCHW), including minibatch dimension. + output_shapes: Output tensor shapes (NC or NCHW), including minibatch dimension. + input_shape: Short-hand for input_shapes[0]. + output_shape: Short-hand for output_shapes[0]. + input_templates: Input placeholders in the template graph. + output_templates: Output tensors in the template graph. + input_names: Name string for each input. + output_names: Name string for each output. + own_vars: Variables defined by this network (local_name => var), excluding sub-networks. + vars: All variables (local_name => var). + trainables: All trainable variables (local_name => var). + var_global_to_local: Mapping from variable global names to local names. + """ + + def __init__(self, name: str = None, func_name: Any = None, **static_kwargs): + tfutil.assert_tf_initialized() + assert isinstance(name, str) or name is None + assert func_name is not None + assert isinstance(func_name, str) or util.is_top_level_function(func_name) + assert util.is_pickleable(static_kwargs) + + self._init_fields() + self.name = name + self.static_kwargs = util.EasyDict(static_kwargs) + + # Locate the user-specified network build function. + if util.is_top_level_function(func_name): + func_name = util.get_top_level_function_name(func_name) + module, self._build_func_name = util.get_module_from_obj_name(func_name) + self._build_func = util.get_obj_from_module(module, self._build_func_name) + assert callable(self._build_func) + + # Dig up source code for the module containing the build function. + self._build_module_src = _import_module_src.get(module, None) + if self._build_module_src is None: + self._build_module_src = inspect.getsource(module) + + # Init TensorFlow graph. + self._init_graph() + self.reset_own_vars() + + def _init_fields(self) -> None: + self.name = None + self.scope = None + self.static_kwargs = util.EasyDict() + self.components = util.EasyDict() + self.num_inputs = 0 + self.num_outputs = 0 + self.input_shapes = [[]] + self.output_shapes = [[]] + self.input_shape = [] + self.output_shape = [] + self.input_templates = [] + self.output_templates = [] + self.input_names = [] + self.output_names = [] + self.own_vars = OrderedDict() + self.vars = OrderedDict() + self.trainables = OrderedDict() + self.var_global_to_local = OrderedDict() + + self._build_func = None # User-supplied build function that constructs the network. + self._build_func_name = None # Name of the build function. + self._build_module_src = None # Full source code of the module containing the build function. + self._run_cache = dict() # Cached graph data for Network.run(). + + def _init_graph(self) -> None: + # Collect inputs. + self.input_names = [] + + for param in inspect.signature(self._build_func).parameters.values(): + if param.kind == param.POSITIONAL_OR_KEYWORD and param.default is param.empty: + self.input_names.append(param.name) + + self.num_inputs = len(self.input_names) + assert self.num_inputs >= 1 + + # Choose name and scope. + if self.name is None: + self.name = self._build_func_name + assert re.match("^[A-Za-z0-9_.\\-]*$", self.name) + with tf.name_scope(None): + self.scope = tf.get_default_graph().unique_name(self.name, mark_as_used=True) + + # Finalize build func kwargs. + build_kwargs = dict(self.static_kwargs) + build_kwargs["is_template_graph"] = True + build_kwargs["components"] = self.components + + # Build template graph. + with tfutil.absolute_variable_scope(self.scope, reuse=tf.AUTO_REUSE), tfutil.absolute_name_scope(self.scope): # ignore surrounding scopes + assert tf.get_variable_scope().name == self.scope + assert tf.get_default_graph().get_name_scope() == self.scope + with tf.control_dependencies(None): # ignore surrounding control dependencies + self.input_templates = [tf.placeholder(tf.float32, name=name) for name in self.input_names] + out_expr = self._build_func(*self.input_templates, **build_kwargs) + + # Collect outputs. + assert tfutil.is_tf_expression(out_expr) or isinstance(out_expr, tuple) + self.output_templates = [out_expr] if tfutil.is_tf_expression(out_expr) else list(out_expr) + self.num_outputs = len(self.output_templates) + assert self.num_outputs >= 1 + assert all(tfutil.is_tf_expression(t) for t in self.output_templates) + + # Perform sanity checks. + if any(t.shape.ndims is None for t in self.input_templates): + raise ValueError("Network input shapes not defined. Please call x.set_shape() for each input.") + if any(t.shape.ndims is None for t in self.output_templates): + raise ValueError("Network output shapes not defined. Please call x.set_shape() where applicable.") + if any(not isinstance(comp, Network) for comp in self.components.values()): + raise ValueError("Components of a Network must be Networks themselves.") + if len(self.components) != len(set(comp.name for comp in self.components.values())): + raise ValueError("Components of a Network must have unique names.") + + # List inputs and outputs. + self.input_shapes = [tfutil.shape_to_list(t.shape) for t in self.input_templates] + self.output_shapes = [tfutil.shape_to_list(t.shape) for t in self.output_templates] + self.input_shape = self.input_shapes[0] + self.output_shape = self.output_shapes[0] + self.output_names = [t.name.split("/")[-1].split(":")[0] for t in self.output_templates] + + # List variables. + self.own_vars = OrderedDict((var.name[len(self.scope) + 1:].split(":")[0], var) for var in tf.global_variables(self.scope + "/")) + self.vars = OrderedDict(self.own_vars) + self.vars.update((comp.name + "/" + name, var) for comp in self.components.values() for name, var in comp.vars.items()) + self.trainables = OrderedDict((name, var) for name, var in self.vars.items() if var.trainable) + self.var_global_to_local = OrderedDict((var.name.split(":")[0], name) for name, var in self.vars.items()) + + def reset_own_vars(self) -> None: + """Re-initialize all variables of this network, excluding sub-networks.""" + tfutil.run([var.initializer for var in self.own_vars.values()]) + + def reset_vars(self) -> None: + """Re-initialize all variables of this network, including sub-networks.""" + tfutil.run([var.initializer for var in self.vars.values()]) + + def reset_trainables(self) -> None: + """Re-initialize all trainable variables of this network, including sub-networks.""" + tfutil.run([var.initializer for var in self.trainables.values()]) + + def get_output_for(self, *in_expr: TfExpression, return_as_list: bool = False, **dynamic_kwargs) -> Union[TfExpression, List[TfExpression]]: + """Construct TensorFlow expression(s) for the output(s) of this network, given the input expression(s).""" + assert len(in_expr) == self.num_inputs + assert not all(expr is None for expr in in_expr) + + # Finalize build func kwargs. + build_kwargs = dict(self.static_kwargs) + build_kwargs.update(dynamic_kwargs) + build_kwargs["is_template_graph"] = False + build_kwargs["components"] = self.components + + # Build TensorFlow graph to evaluate the network. + with tfutil.absolute_variable_scope(self.scope, reuse=True), tf.name_scope(self.name): + assert tf.get_variable_scope().name == self.scope + valid_inputs = [expr for expr in in_expr if expr is not None] + final_inputs = [] + for expr, name, shape in zip(in_expr, self.input_names, self.input_shapes): + if expr is not None: + expr = tf.identity(expr, name=name) + else: + expr = tf.zeros([tf.shape(valid_inputs[0])[0]] + shape[1:], name=name) + final_inputs.append(expr) + out_expr = self._build_func(*final_inputs, **build_kwargs) + + # Propagate input shapes back to the user-specified expressions. + for expr, final in zip(in_expr, final_inputs): + if isinstance(expr, tf.Tensor): + expr.set_shape(final.shape) + + # Express outputs in the desired format. + assert tfutil.is_tf_expression(out_expr) or isinstance(out_expr, tuple) + if return_as_list: + out_expr = [out_expr] if tfutil.is_tf_expression(out_expr) else list(out_expr) + return out_expr + + def get_var_local_name(self, var_or_global_name: Union[TfExpression, str]) -> str: + """Get the local name of a given variable, without any surrounding name scopes.""" + assert tfutil.is_tf_expression(var_or_global_name) or isinstance(var_or_global_name, str) + global_name = var_or_global_name if isinstance(var_or_global_name, str) else var_or_global_name.name + return self.var_global_to_local[global_name] + + def find_var(self, var_or_local_name: Union[TfExpression, str]) -> TfExpression: + """Find variable by local or global name.""" + assert tfutil.is_tf_expression(var_or_local_name) or isinstance(var_or_local_name, str) + return self.vars[var_or_local_name] if isinstance(var_or_local_name, str) else var_or_local_name + + def get_var(self, var_or_local_name: Union[TfExpression, str]) -> np.ndarray: + """Get the value of a given variable as NumPy array. + Note: This method is very inefficient -- prefer to use tflib.run(list_of_vars) whenever possible.""" + return self.find_var(var_or_local_name).eval() + + def set_var(self, var_or_local_name: Union[TfExpression, str], new_value: Union[int, float, np.ndarray]) -> None: + """Set the value of a given variable based on the given NumPy array. + Note: This method is very inefficient -- prefer to use tflib.set_vars() whenever possible.""" + tfutil.set_vars({self.find_var(var_or_local_name): new_value}) + + def __getstate__(self) -> dict: + """Pickle export.""" + state = dict() + state["version"] = 3 + state["name"] = self.name + state["static_kwargs"] = dict(self.static_kwargs) + state["components"] = dict(self.components) + state["build_module_src"] = self._build_module_src + state["build_func_name"] = self._build_func_name + state["variables"] = list(zip(self.own_vars.keys(), tfutil.run(list(self.own_vars.values())))) + return state + + def __setstate__(self, state: dict) -> None: + """Pickle import.""" + # pylint: disable=attribute-defined-outside-init + tfutil.assert_tf_initialized() + self._init_fields() + + # Execute custom import handlers. + for handler in _import_handlers: + state = handler(state) + + # Set basic fields. + assert state["version"] in [2, 3] + self.name = state["name"] + self.static_kwargs = util.EasyDict(state["static_kwargs"]) + self.components = util.EasyDict(state.get("components", {})) + self._build_module_src = state["build_module_src"] + self._build_func_name = state["build_func_name"] + + # Create temporary module from the imported source code. + module_name = "_tflib_network_import_" + uuid.uuid4().hex + module = types.ModuleType(module_name) + sys.modules[module_name] = module + _import_module_src[module] = self._build_module_src + exec(self._build_module_src, module.__dict__) # pylint: disable=exec-used + + # Locate network build function in the temporary module. + self._build_func = util.get_obj_from_module(module, self._build_func_name) + assert callable(self._build_func) + + # Init TensorFlow graph. + self._init_graph() + self.reset_own_vars() + tfutil.set_vars({self.find_var(name): value for name, value in state["variables"]}) + + def clone(self, name: str = None, **new_static_kwargs) -> "Network": + """Create a clone of this network with its own copy of the variables.""" + # pylint: disable=protected-access + net = object.__new__(Network) + net._init_fields() + net.name = name if name is not None else self.name + net.static_kwargs = util.EasyDict(self.static_kwargs) + net.static_kwargs.update(new_static_kwargs) + net._build_module_src = self._build_module_src + net._build_func_name = self._build_func_name + net._build_func = self._build_func + net._init_graph() + net.copy_vars_from(self) + return net + + def copy_own_vars_from(self, src_net: "Network") -> None: + """Copy the values of all variables from the given network, excluding sub-networks.""" + names = [name for name in self.own_vars.keys() if name in src_net.own_vars] + tfutil.set_vars(tfutil.run({self.vars[name]: src_net.vars[name] for name in names})) + + def copy_vars_from(self, src_net: "Network") -> None: + """Copy the values of all variables from the given network, including sub-networks.""" + names = [name for name in self.vars.keys() if name in src_net.vars] + tfutil.set_vars(tfutil.run({self.vars[name]: src_net.vars[name] for name in names})) + + def copy_trainables_from(self, src_net: "Network") -> None: + """Copy the values of all trainable variables from the given network, including sub-networks.""" + names = [name for name in self.trainables.keys() if name in src_net.trainables] + tfutil.set_vars(tfutil.run({self.vars[name]: src_net.vars[name] for name in names})) + + def convert(self, new_func_name: str, new_name: str = None, **new_static_kwargs) -> "Network": + """Create new network with the given parameters, and copy all variables from this network.""" + if new_name is None: + new_name = self.name + static_kwargs = dict(self.static_kwargs) + static_kwargs.update(new_static_kwargs) + net = Network(name=new_name, func_name=new_func_name, **static_kwargs) + net.copy_vars_from(self) + return net + + def setup_as_moving_average_of(self, src_net: "Network", beta: TfExpressionEx = 0.99, beta_nontrainable: TfExpressionEx = 0.0) -> tf.Operation: + """Construct a TensorFlow op that updates the variables of this network + to be slightly closer to those of the given network.""" + with tfutil.absolute_name_scope(self.scope + "/_MovingAvg"): + ops = [] + for name, var in self.vars.items(): + if name in src_net.vars: + cur_beta = beta if name in self.trainables else beta_nontrainable + new_value = tfutil.lerp(src_net.vars[name], var, cur_beta) + ops.append(var.assign(new_value)) + return tf.group(*ops) + + def run(self, + *in_arrays: Tuple[Union[np.ndarray, None], ...], + input_transform: dict = None, + output_transform: dict = None, + return_as_list: bool = False, + print_progress: bool = False, + minibatch_size: int = None, + num_gpus: int = 1, + assume_frozen: bool = False, + **dynamic_kwargs) -> Union[np.ndarray, Tuple[np.ndarray, ...], List[np.ndarray]]: + """Run this network for the given NumPy array(s), and return the output(s) as NumPy array(s). + + Args: + input_transform: A dict specifying a custom transformation to be applied to the input tensor(s) before evaluating the network. + The dict must contain a 'func' field that points to a top-level function. The function is called with the input + TensorFlow expression(s) as positional arguments. Any remaining fields of the dict will be passed in as kwargs. + output_transform: A dict specifying a custom transformation to be applied to the output tensor(s) after evaluating the network. + The dict must contain a 'func' field that points to a top-level function. The function is called with the output + TensorFlow expression(s) as positional arguments. Any remaining fields of the dict will be passed in as kwargs. + return_as_list: True = return a list of NumPy arrays, False = return a single NumPy array, or a tuple if there are multiple outputs. + print_progress: Print progress to the console? Useful for very large input arrays. + minibatch_size: Maximum minibatch size to use, None = disable batching. + num_gpus: Number of GPUs to use. + assume_frozen: Improve multi-GPU performance by assuming that the trainable parameters will remain changed between calls. + dynamic_kwargs: Additional keyword arguments to be passed into the network build function. + """ + assert len(in_arrays) == self.num_inputs + assert not all(arr is None for arr in in_arrays) + assert input_transform is None or util.is_top_level_function(input_transform["func"]) + assert output_transform is None or util.is_top_level_function(output_transform["func"]) + output_transform, dynamic_kwargs = _handle_legacy_output_transforms(output_transform, dynamic_kwargs) + num_items = in_arrays[0].shape[0] + if minibatch_size is None: + minibatch_size = num_items + + # Construct unique hash key from all arguments that affect the TensorFlow graph. + key = dict(input_transform=input_transform, output_transform=output_transform, num_gpus=num_gpus, assume_frozen=assume_frozen, dynamic_kwargs=dynamic_kwargs) + def unwind_key(obj): + if isinstance(obj, dict): + return [(key, unwind_key(value)) for key, value in sorted(obj.items())] + if callable(obj): + return util.get_top_level_function_name(obj) + return obj + key = repr(unwind_key(key)) + + # Build graph. + if key not in self._run_cache: + with tfutil.absolute_name_scope(self.scope + "/_Run"), tf.control_dependencies(None): + with tf.device("/cpu:0"): + in_expr = [tf.placeholder(tf.float32, name=name) for name in self.input_names] + in_split = list(zip(*[tf.split(x, num_gpus) for x in in_expr])) + + out_split = [] + for gpu in range(num_gpus): + with tf.device("/gpu:%d" % gpu): + net_gpu = self.clone() if assume_frozen else self + in_gpu = in_split[gpu] + + if input_transform is not None: + in_kwargs = dict(input_transform) + in_gpu = in_kwargs.pop("func")(*in_gpu, **in_kwargs) + in_gpu = [in_gpu] if tfutil.is_tf_expression(in_gpu) else list(in_gpu) + + assert len(in_gpu) == self.num_inputs + out_gpu = net_gpu.get_output_for(*in_gpu, return_as_list=True, **dynamic_kwargs) + + if output_transform is not None: + out_kwargs = dict(output_transform) + out_gpu = out_kwargs.pop("func")(*out_gpu, **out_kwargs) + out_gpu = [out_gpu] if tfutil.is_tf_expression(out_gpu) else list(out_gpu) + + assert len(out_gpu) == self.num_outputs + out_split.append(out_gpu) + + with tf.device("/cpu:0"): + out_expr = [tf.concat(outputs, axis=0) for outputs in zip(*out_split)] + self._run_cache[key] = in_expr, out_expr + + # Run minibatches. + in_expr, out_expr = self._run_cache[key] + out_arrays = [np.empty([num_items] + tfutil.shape_to_list(expr.shape)[1:], expr.dtype.name) for expr in out_expr] + + for mb_begin in range(0, num_items, minibatch_size): + if print_progress: + print("\r%d / %d" % (mb_begin, num_items), end="") + + mb_end = min(mb_begin + minibatch_size, num_items) + mb_num = mb_end - mb_begin + mb_in = [src[mb_begin : mb_end] if src is not None else np.zeros([mb_num] + shape[1:]) for src, shape in zip(in_arrays, self.input_shapes)] + mb_out = tf.get_default_session().run(out_expr, dict(zip(in_expr, mb_in))) + + for dst, src in zip(out_arrays, mb_out): + dst[mb_begin: mb_end] = src + + # Done. + if print_progress: + print("\r%d / %d" % (num_items, num_items)) + + if not return_as_list: + out_arrays = out_arrays[0] if len(out_arrays) == 1 else tuple(out_arrays) + return out_arrays + + def list_ops(self) -> List[TfExpression]: + include_prefix = self.scope + "/" + exclude_prefix = include_prefix + "_" + ops = tf.get_default_graph().get_operations() + ops = [op for op in ops if op.name.startswith(include_prefix)] + ops = [op for op in ops if not op.name.startswith(exclude_prefix)] + return ops + + def list_layers(self) -> List[Tuple[str, TfExpression, List[TfExpression]]]: + """Returns a list of (layer_name, output_expr, trainable_vars) tuples corresponding to + individual layers of the network. Mainly intended to be used for reporting.""" + layers = [] + + def recurse(scope, parent_ops, parent_vars, level): + # Ignore specific patterns. + if any(p in scope for p in ["/Shape", "/strided_slice", "/Cast", "/concat", "/Assign"]): + return + + # Filter ops and vars by scope. + global_prefix = scope + "/" + local_prefix = global_prefix[len(self.scope) + 1:] + cur_ops = [op for op in parent_ops if op.name.startswith(global_prefix) or op.name == global_prefix[:-1]] + cur_vars = [(name, var) for name, var in parent_vars if name.startswith(local_prefix) or name == local_prefix[:-1]] + if not cur_ops and not cur_vars: + return + + # Filter out all ops related to variables. + for var in [op for op in cur_ops if op.type.startswith("Variable")]: + var_prefix = var.name + "/" + cur_ops = [op for op in cur_ops if not op.name.startswith(var_prefix)] + + # Scope does not contain ops as immediate children => recurse deeper. + contains_direct_ops = any("/" not in op.name[len(global_prefix):] and op.type != "Identity" for op in cur_ops) + if (level == 0 or not contains_direct_ops) and (len(cur_ops) + len(cur_vars)) > 1: + visited = set() + for rel_name in [op.name[len(global_prefix):] for op in cur_ops] + [name[len(local_prefix):] for name, _var in cur_vars]: + token = rel_name.split("/")[0] + if token not in visited: + recurse(global_prefix + token, cur_ops, cur_vars, level + 1) + visited.add(token) + return + + # Report layer. + layer_name = scope[len(self.scope) + 1:] + layer_output = cur_ops[-1].outputs[0] if cur_ops else cur_vars[-1][1] + layer_trainables = [var for _name, var in cur_vars if var.trainable] + layers.append((layer_name, layer_output, layer_trainables)) + + recurse(self.scope, self.list_ops(), list(self.vars.items()), 0) + return layers + + def print_layers(self, title: str = None, hide_layers_with_no_params: bool = False) -> None: + """Print a summary table of the network structure.""" + rows = [[title if title is not None else self.name, "Params", "OutputShape", "WeightShape"]] + rows += [["---"] * 4] + total_params = 0 + + for layer_name, layer_output, layer_trainables in self.list_layers(): + num_params = sum(np.prod(tfutil.shape_to_list(var.shape)) for var in layer_trainables) + weights = [var for var in layer_trainables if var.name.endswith("/weight:0")] + weights.sort(key=lambda x: len(x.name)) + if len(weights) == 0 and len(layer_trainables) == 1: + weights = layer_trainables + total_params += num_params + + if not hide_layers_with_no_params or num_params != 0: + num_params_str = str(num_params) if num_params > 0 else "-" + output_shape_str = str(layer_output.shape) + weight_shape_str = str(weights[0].shape) if len(weights) >= 1 else "-" + rows += [[layer_name, num_params_str, output_shape_str, weight_shape_str]] + + rows += [["---"] * 4] + rows += [["Total", str(total_params), "", ""]] + + widths = [max(len(cell) for cell in column) for column in zip(*rows)] + print() + for row in rows: + print(" ".join(cell + " " * (width - len(cell)) for cell, width in zip(row, widths))) + print() + + def setup_weight_histograms(self, title: str = None) -> None: + """Construct summary ops to include histograms of all trainable parameters in TensorBoard.""" + if title is None: + title = self.name + + with tf.name_scope(None), tf.device(None), tf.control_dependencies(None): + for local_name, var in self.trainables.items(): + if "/" in local_name: + p = local_name.split("/") + name = title + "_" + p[-1] + "/" + "_".join(p[:-1]) + else: + name = title + "_toplevel/" + local_name + + tf.summary.histogram(name, var) + +#---------------------------------------------------------------------------- +# Backwards-compatible emulation of legacy output transformation in Network.run(). + +_print_legacy_warning = True + +def _handle_legacy_output_transforms(output_transform, dynamic_kwargs): + global _print_legacy_warning + legacy_kwargs = ["out_mul", "out_add", "out_shrink", "out_dtype"] + if not any(kwarg in dynamic_kwargs for kwarg in legacy_kwargs): + return output_transform, dynamic_kwargs + + if _print_legacy_warning: + _print_legacy_warning = False + print() + print("WARNING: Old-style output transformations in Network.run() are deprecated.") + print("Consider using 'output_transform=dict(func=tflib.convert_images_to_uint8)'") + print("instead of 'out_mul=127.5, out_add=127.5, out_dtype=np.uint8'.") + print() + assert output_transform is None + + new_kwargs = dict(dynamic_kwargs) + new_transform = {kwarg: new_kwargs.pop(kwarg) for kwarg in legacy_kwargs if kwarg in dynamic_kwargs} + new_transform["func"] = _legacy_output_transform_func + return new_transform, new_kwargs + +def _legacy_output_transform_func(*expr, out_mul=1.0, out_add=0.0, out_shrink=1, out_dtype=None): + if out_mul != 1.0: + expr = [x * out_mul for x in expr] + + if out_add != 0.0: + expr = [x + out_add for x in expr] + + if out_shrink > 1: + ksize = [1, 1, out_shrink, out_shrink] + expr = [tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding="VALID", data_format="NCHW") for x in expr] + + if out_dtype is not None: + if tf.as_dtype(out_dtype).is_integer: + expr = [tf.round(x) for x in expr] + expr = [tf.saturate_cast(x, out_dtype) for x in expr] + return expr diff --git a/models/stylegan_tf_official/dnnlib/tflib/optimizer.py b/models/stylegan_tf_official/dnnlib/tflib/optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..6ed88cb236365234597f8734299fbb315c56cc73 --- /dev/null +++ b/models/stylegan_tf_official/dnnlib/tflib/optimizer.py @@ -0,0 +1,214 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +"""Helper wrapper for a Tensorflow optimizer.""" + +import numpy as np +import tensorflow as tf + +from collections import OrderedDict +from typing import List, Union + +from . import autosummary +from . import tfutil +from .. import util + +from .tfutil import TfExpression, TfExpressionEx + +try: + # TensorFlow 1.13 + from tensorflow.python.ops import nccl_ops +except: + # Older TensorFlow versions + import tensorflow.contrib.nccl as nccl_ops + +class Optimizer: + """A Wrapper for tf.train.Optimizer. + + Automatically takes care of: + - Gradient averaging for multi-GPU training. + - Dynamic loss scaling and typecasts for FP16 training. + - Ignoring corrupted gradients that contain NaNs/Infs. + - Reporting statistics. + - Well-chosen default settings. + """ + + def __init__(self, + name: str = "Train", + tf_optimizer: str = "tf.train.AdamOptimizer", + learning_rate: TfExpressionEx = 0.001, + use_loss_scaling: bool = False, + loss_scaling_init: float = 64.0, + loss_scaling_inc: float = 0.0005, + loss_scaling_dec: float = 1.0, + **kwargs): + + # Init fields. + self.name = name + self.learning_rate = tf.convert_to_tensor(learning_rate) + self.id = self.name.replace("/", ".") + self.scope = tf.get_default_graph().unique_name(self.id) + self.optimizer_class = util.get_obj_by_name(tf_optimizer) + self.optimizer_kwargs = dict(kwargs) + self.use_loss_scaling = use_loss_scaling + self.loss_scaling_init = loss_scaling_init + self.loss_scaling_inc = loss_scaling_inc + self.loss_scaling_dec = loss_scaling_dec + self._grad_shapes = None # [shape, ...] + self._dev_opt = OrderedDict() # device => optimizer + self._dev_grads = OrderedDict() # device => [[(grad, var), ...], ...] + self._dev_ls_var = OrderedDict() # device => variable (log2 of loss scaling factor) + self._updates_applied = False + + def register_gradients(self, loss: TfExpression, trainable_vars: Union[List, dict]) -> None: + """Register the gradients of the given loss function with respect to the given variables. + Intended to be called once per GPU.""" + assert not self._updates_applied + + # Validate arguments. + if isinstance(trainable_vars, dict): + trainable_vars = list(trainable_vars.values()) # allow passing in Network.trainables as vars + + assert isinstance(trainable_vars, list) and len(trainable_vars) >= 1 + assert all(tfutil.is_tf_expression(expr) for expr in trainable_vars + [loss]) + + if self._grad_shapes is None: + self._grad_shapes = [tfutil.shape_to_list(var.shape) for var in trainable_vars] + + assert len(trainable_vars) == len(self._grad_shapes) + assert all(tfutil.shape_to_list(var.shape) == var_shape for var, var_shape in zip(trainable_vars, self._grad_shapes)) + + dev = loss.device + + assert all(var.device == dev for var in trainable_vars) + + # Register device and compute gradients. + with tf.name_scope(self.id + "_grad"), tf.device(dev): + if dev not in self._dev_opt: + opt_name = self.scope.replace("/", "_") + "_opt%d" % len(self._dev_opt) + assert callable(self.optimizer_class) + self._dev_opt[dev] = self.optimizer_class(name=opt_name, learning_rate=self.learning_rate, **self.optimizer_kwargs) + self._dev_grads[dev] = [] + + loss = self.apply_loss_scaling(tf.cast(loss, tf.float32)) + grads = self._dev_opt[dev].compute_gradients(loss, trainable_vars, gate_gradients=tf.train.Optimizer.GATE_NONE) # disable gating to reduce memory usage + grads = [(g, v) if g is not None else (tf.zeros_like(v), v) for g, v in grads] # replace disconnected gradients with zeros + self._dev_grads[dev].append(grads) + + def apply_updates(self) -> tf.Operation: + """Construct training op to update the registered variables based on their gradients.""" + tfutil.assert_tf_initialized() + assert not self._updates_applied + self._updates_applied = True + devices = list(self._dev_grads.keys()) + total_grads = sum(len(grads) for grads in self._dev_grads.values()) + assert len(devices) >= 1 and total_grads >= 1 + ops = [] + + with tfutil.absolute_name_scope(self.scope): + # Cast gradients to FP32 and calculate partial sum within each device. + dev_grads = OrderedDict() # device => [(grad, var), ...] + + for dev_idx, dev in enumerate(devices): + with tf.name_scope("ProcessGrads%d" % dev_idx), tf.device(dev): + sums = [] + + for gv in zip(*self._dev_grads[dev]): + assert all(v is gv[0][1] for g, v in gv) + g = [tf.cast(g, tf.float32) for g, v in gv] + g = g[0] if len(g) == 1 else tf.add_n(g) + sums.append((g, gv[0][1])) + + dev_grads[dev] = sums + + # Sum gradients across devices. + if len(devices) > 1: + with tf.name_scope("SumAcrossGPUs"), tf.device(None): + for var_idx, grad_shape in enumerate(self._grad_shapes): + g = [dev_grads[dev][var_idx][0] for dev in devices] + + if np.prod(grad_shape): # nccl does not support zero-sized tensors + g = nccl_ops.all_sum(g) + + for dev, gg in zip(devices, g): + dev_grads[dev][var_idx] = (gg, dev_grads[dev][var_idx][1]) + + # Apply updates separately on each device. + for dev_idx, (dev, grads) in enumerate(dev_grads.items()): + with tf.name_scope("ApplyGrads%d" % dev_idx), tf.device(dev): + # Scale gradients as needed. + if self.use_loss_scaling or total_grads > 1: + with tf.name_scope("Scale"): + coef = tf.constant(np.float32(1.0 / total_grads), name="coef") + coef = self.undo_loss_scaling(coef) + grads = [(g * coef, v) for g, v in grads] + + # Check for overflows. + with tf.name_scope("CheckOverflow"): + grad_ok = tf.reduce_all(tf.stack([tf.reduce_all(tf.is_finite(g)) for g, v in grads])) + + # Update weights and adjust loss scaling. + with tf.name_scope("UpdateWeights"): + # pylint: disable=cell-var-from-loop + opt = self._dev_opt[dev] + ls_var = self.get_loss_scaling_var(dev) + + if not self.use_loss_scaling: + ops.append(tf.cond(grad_ok, lambda: opt.apply_gradients(grads), tf.no_op)) + else: + ops.append(tf.cond(grad_ok, + lambda: tf.group(tf.assign_add(ls_var, self.loss_scaling_inc), opt.apply_gradients(grads)), + lambda: tf.group(tf.assign_sub(ls_var, self.loss_scaling_dec)))) + + # Report statistics on the last device. + if dev == devices[-1]: + with tf.name_scope("Statistics"): + ops.append(autosummary.autosummary(self.id + "/learning_rate", self.learning_rate)) + ops.append(autosummary.autosummary(self.id + "/overflow_frequency", tf.where(grad_ok, 0, 1))) + + if self.use_loss_scaling: + ops.append(autosummary.autosummary(self.id + "/loss_scaling_log2", ls_var)) + + # Initialize variables and group everything into a single op. + self.reset_optimizer_state() + tfutil.init_uninitialized_vars(list(self._dev_ls_var.values())) + + return tf.group(*ops, name="TrainingOp") + + def reset_optimizer_state(self) -> None: + """Reset internal state of the underlying optimizer.""" + tfutil.assert_tf_initialized() + tfutil.run([var.initializer for opt in self._dev_opt.values() for var in opt.variables()]) + + def get_loss_scaling_var(self, device: str) -> Union[tf.Variable, None]: + """Get or create variable representing log2 of the current dynamic loss scaling factor.""" + if not self.use_loss_scaling: + return None + + if device not in self._dev_ls_var: + with tfutil.absolute_name_scope(self.scope + "/LossScalingVars"), tf.control_dependencies(None): + self._dev_ls_var[device] = tf.Variable(np.float32(self.loss_scaling_init), name="loss_scaling_var") + + return self._dev_ls_var[device] + + def apply_loss_scaling(self, value: TfExpression) -> TfExpression: + """Apply dynamic loss scaling for the given expression.""" + assert tfutil.is_tf_expression(value) + + if not self.use_loss_scaling: + return value + + return value * tfutil.exp2(self.get_loss_scaling_var(value.device)) + + def undo_loss_scaling(self, value: TfExpression) -> TfExpression: + """Undo the effect of dynamic loss scaling for the given expression.""" + assert tfutil.is_tf_expression(value) + + if not self.use_loss_scaling: + return value + + return value * tfutil.exp2(-self.get_loss_scaling_var(value.device)) # pylint: disable=invalid-unary-operand-type diff --git a/models/stylegan_tf_official/dnnlib/tflib/tfutil.py b/models/stylegan_tf_official/dnnlib/tflib/tfutil.py new file mode 100644 index 0000000000000000000000000000000000000000..a431a4d4d18a32c9cd44a14ce89f35e038dc312c --- /dev/null +++ b/models/stylegan_tf_official/dnnlib/tflib/tfutil.py @@ -0,0 +1,240 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +"""Miscellaneous helper utils for Tensorflow.""" + +import os +import numpy as np +import tensorflow as tf + +from typing import Any, Iterable, List, Union + +TfExpression = Union[tf.Tensor, tf.Variable, tf.Operation] +"""A type that represents a valid Tensorflow expression.""" + +TfExpressionEx = Union[TfExpression, int, float, np.ndarray] +"""A type that can be converted to a valid Tensorflow expression.""" + + +def run(*args, **kwargs) -> Any: + """Run the specified ops in the default session.""" + assert_tf_initialized() + return tf.get_default_session().run(*args, **kwargs) + + +def is_tf_expression(x: Any) -> bool: + """Check whether the input is a valid Tensorflow expression, i.e., Tensorflow Tensor, Variable, or Operation.""" + return isinstance(x, (tf.Tensor, tf.Variable, tf.Operation)) + + +def shape_to_list(shape: Iterable[tf.Dimension]) -> List[Union[int, None]]: + """Convert a Tensorflow shape to a list of ints.""" + return [dim.value for dim in shape] + + +def flatten(x: TfExpressionEx) -> TfExpression: + """Shortcut function for flattening a tensor.""" + with tf.name_scope("Flatten"): + return tf.reshape(x, [-1]) + + +def log2(x: TfExpressionEx) -> TfExpression: + """Logarithm in base 2.""" + with tf.name_scope("Log2"): + return tf.log(x) * np.float32(1.0 / np.log(2.0)) + + +def exp2(x: TfExpressionEx) -> TfExpression: + """Exponent in base 2.""" + with tf.name_scope("Exp2"): + return tf.exp(x * np.float32(np.log(2.0))) + + +def lerp(a: TfExpressionEx, b: TfExpressionEx, t: TfExpressionEx) -> TfExpressionEx: + """Linear interpolation.""" + with tf.name_scope("Lerp"): + return a + (b - a) * t + + +def lerp_clip(a: TfExpressionEx, b: TfExpressionEx, t: TfExpressionEx) -> TfExpression: + """Linear interpolation with clip.""" + with tf.name_scope("LerpClip"): + return a + (b - a) * tf.clip_by_value(t, 0.0, 1.0) + + +def absolute_name_scope(scope: str) -> tf.name_scope: + """Forcefully enter the specified name scope, ignoring any surrounding scopes.""" + return tf.name_scope(scope + "/") + + +def absolute_variable_scope(scope: str, **kwargs) -> tf.variable_scope: + """Forcefully enter the specified variable scope, ignoring any surrounding scopes.""" + return tf.variable_scope(tf.VariableScope(name=scope, **kwargs), auxiliary_name_scope=False) + + +def _sanitize_tf_config(config_dict: dict = None) -> dict: + # Defaults. + cfg = dict() + cfg["rnd.np_random_seed"] = None # Random seed for NumPy. None = keep as is. + cfg["rnd.tf_random_seed"] = "auto" # Random seed for TensorFlow. 'auto' = derive from NumPy random state. None = keep as is. + cfg["env.TF_CPP_MIN_LOG_LEVEL"] = "1" # 0 = Print all available debug info from TensorFlow. 1 = Print warnings and errors, but disable debug info. + cfg["graph_options.place_pruned_graph"] = True # False = Check that all ops are available on the designated device. True = Skip the check for ops that are not used. + cfg["gpu_options.allow_growth"] = True # False = Allocate all GPU memory at the beginning. True = Allocate only as much GPU memory as needed. + + # User overrides. + if config_dict is not None: + cfg.update(config_dict) + return cfg + + +def init_tf(config_dict: dict = None) -> None: + """Initialize TensorFlow session using good default settings.""" + # Skip if already initialized. + if tf.get_default_session() is not None: + return + + # Setup config dict and random seeds. + cfg = _sanitize_tf_config(config_dict) + np_random_seed = cfg["rnd.np_random_seed"] + if np_random_seed is not None: + np.random.seed(np_random_seed) + tf_random_seed = cfg["rnd.tf_random_seed"] + if tf_random_seed == "auto": + tf_random_seed = np.random.randint(1 << 31) + if tf_random_seed is not None: + tf.set_random_seed(tf_random_seed) + + # Setup environment variables. + for key, value in list(cfg.items()): + fields = key.split(".") + if fields[0] == "env": + assert len(fields) == 2 + os.environ[fields[1]] = str(value) + + # Create default TensorFlow session. + create_session(cfg, force_as_default=True) + + +def assert_tf_initialized(): + """Check that TensorFlow session has been initialized.""" + if tf.get_default_session() is None: + raise RuntimeError("No default TensorFlow session found. Please call dnnlib.tflib.init_tf().") + + +def create_session(config_dict: dict = None, force_as_default: bool = False) -> tf.Session: + """Create tf.Session based on config dict.""" + # Setup TensorFlow config proto. + cfg = _sanitize_tf_config(config_dict) + config_proto = tf.ConfigProto() + for key, value in cfg.items(): + fields = key.split(".") + if fields[0] not in ["rnd", "env"]: + obj = config_proto + for field in fields[:-1]: + obj = getattr(obj, field) + setattr(obj, fields[-1], value) + + # Create session. + session = tf.Session(config=config_proto) + if force_as_default: + # pylint: disable=protected-access + session._default_session = session.as_default() + session._default_session.enforce_nesting = False + session._default_session.__enter__() # pylint: disable=no-member + + return session + + +def init_uninitialized_vars(target_vars: List[tf.Variable] = None) -> None: + """Initialize all tf.Variables that have not already been initialized. + + Equivalent to the following, but more efficient and does not bloat the tf graph: + tf.variables_initializer(tf.report_uninitialized_variables()).run() + """ + assert_tf_initialized() + if target_vars is None: + target_vars = tf.global_variables() + + test_vars = [] + test_ops = [] + + with tf.control_dependencies(None): # ignore surrounding control_dependencies + for var in target_vars: + assert is_tf_expression(var) + + try: + tf.get_default_graph().get_tensor_by_name(var.name.replace(":0", "/IsVariableInitialized:0")) + except KeyError: + # Op does not exist => variable may be uninitialized. + test_vars.append(var) + + with absolute_name_scope(var.name.split(":")[0]): + test_ops.append(tf.is_variable_initialized(var)) + + init_vars = [var for var, inited in zip(test_vars, run(test_ops)) if not inited] + run([var.initializer for var in init_vars]) + + +def set_vars(var_to_value_dict: dict) -> None: + """Set the values of given tf.Variables. + + Equivalent to the following, but more efficient and does not bloat the tf graph: + tflib.run([tf.assign(var, value) for var, value in var_to_value_dict.items()] + """ + assert_tf_initialized() + ops = [] + feed_dict = {} + + for var, value in var_to_value_dict.items(): + assert is_tf_expression(var) + + try: + setter = tf.get_default_graph().get_tensor_by_name(var.name.replace(":0", "/setter:0")) # look for existing op + except KeyError: + with absolute_name_scope(var.name.split(":")[0]): + with tf.control_dependencies(None): # ignore surrounding control_dependencies + setter = tf.assign(var, tf.placeholder(var.dtype, var.shape, "new_value"), name="setter") # create new setter + + ops.append(setter) + feed_dict[setter.op.inputs[1]] = value + + run(ops, feed_dict) + + +def create_var_with_large_initial_value(initial_value: np.ndarray, *args, **kwargs): + """Create tf.Variable with large initial value without bloating the tf graph.""" + assert_tf_initialized() + assert isinstance(initial_value, np.ndarray) + zeros = tf.zeros(initial_value.shape, initial_value.dtype) + var = tf.Variable(zeros, *args, **kwargs) + set_vars({var: initial_value}) + return var + + +def convert_images_from_uint8(images, drange=[-1,1], nhwc_to_nchw=False): + """Convert a minibatch of images from uint8 to float32 with configurable dynamic range. + Can be used as an input transformation for Network.run(). + """ + images = tf.cast(images, tf.float32) + if nhwc_to_nchw: + images = tf.transpose(images, [0, 3, 1, 2]) + return (images - drange[0]) * ((drange[1] - drange[0]) / 255) + + +def convert_images_to_uint8(images, drange=[-1,1], nchw_to_nhwc=False, shrink=1): + """Convert a minibatch of images from float32 to uint8 with configurable dynamic range. + Can be used as an output transformation for Network.run(). + """ + images = tf.cast(images, tf.float32) + if shrink > 1: + ksize = [1, 1, shrink, shrink] + images = tf.nn.avg_pool(images, ksize=ksize, strides=ksize, padding="VALID", data_format="NCHW") + if nchw_to_nhwc: + images = tf.transpose(images, [0, 2, 3, 1]) + scale = 255 / (drange[1] - drange[0]) + images = images * scale + (0.5 - drange[0] * scale) + return tf.saturate_cast(images, tf.uint8) diff --git a/models/stylegan_tf_official/dnnlib/util.py b/models/stylegan_tf_official/dnnlib/util.py new file mode 100644 index 0000000000000000000000000000000000000000..133ef764c0707d9384a33f0350ba71b1e624072f --- /dev/null +++ b/models/stylegan_tf_official/dnnlib/util.py @@ -0,0 +1,405 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +"""Miscellaneous utility classes and functions.""" + +import ctypes +import fnmatch +import importlib +import inspect +import numpy as np +import os +import shutil +import sys +import types +import io +import pickle +import re +import requests +import html +import hashlib +import glob +import uuid + +from distutils.util import strtobool +from typing import Any, List, Tuple, Union + + +# Util classes +# ------------------------------------------------------------------------------------------ + + +class EasyDict(dict): + """Convenience class that behaves like a dict but allows access with the attribute syntax.""" + + def __getattr__(self, name: str) -> Any: + try: + return self[name] + except KeyError: + raise AttributeError(name) + + def __setattr__(self, name: str, value: Any) -> None: + self[name] = value + + def __delattr__(self, name: str) -> None: + del self[name] + + +class Logger(object): + """Redirect stderr to stdout, optionally print stdout to a file, and optionally force flushing on both stdout and the file.""" + + def __init__(self, file_name: str = None, file_mode: str = "w", should_flush: bool = True): + self.file = None + + if file_name is not None: + self.file = open(file_name, file_mode) + + self.should_flush = should_flush + self.stdout = sys.stdout + self.stderr = sys.stderr + + sys.stdout = self + sys.stderr = self + + def __enter__(self) -> "Logger": + return self + + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: + self.close() + + def write(self, text: str) -> None: + """Write text to stdout (and a file) and optionally flush.""" + if len(text) == 0: # workaround for a bug in VSCode debugger: sys.stdout.write(''); sys.stdout.flush() => crash + return + + if self.file is not None: + self.file.write(text) + + self.stdout.write(text) + + if self.should_flush: + self.flush() + + def flush(self) -> None: + """Flush written text to both stdout and a file, if open.""" + if self.file is not None: + self.file.flush() + + self.stdout.flush() + + def close(self) -> None: + """Flush, close possible files, and remove stdout/stderr mirroring.""" + self.flush() + + # if using multiple loggers, prevent closing in wrong order + if sys.stdout is self: + sys.stdout = self.stdout + if sys.stderr is self: + sys.stderr = self.stderr + + if self.file is not None: + self.file.close() + + +# Small util functions +# ------------------------------------------------------------------------------------------ + + +def format_time(seconds: Union[int, float]) -> str: + """Convert the seconds to human readable string with days, hours, minutes and seconds.""" + s = int(np.rint(seconds)) + + if s < 60: + return "{0}s".format(s) + elif s < 60 * 60: + return "{0}m {1:02}s".format(s // 60, s % 60) + elif s < 24 * 60 * 60: + return "{0}h {1:02}m {2:02}s".format(s // (60 * 60), (s // 60) % 60, s % 60) + else: + return "{0}d {1:02}h {2:02}m".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24, (s // 60) % 60) + + +def ask_yes_no(question: str) -> bool: + """Ask the user the question until the user inputs a valid answer.""" + while True: + try: + print("{0} [y/n]".format(question)) + return strtobool(input().lower()) + except ValueError: + pass + + +def tuple_product(t: Tuple) -> Any: + """Calculate the product of the tuple elements.""" + result = 1 + + for v in t: + result *= v + + return result + + +_str_to_ctype = { + "uint8": ctypes.c_ubyte, + "uint16": ctypes.c_uint16, + "uint32": ctypes.c_uint32, + "uint64": ctypes.c_uint64, + "int8": ctypes.c_byte, + "int16": ctypes.c_int16, + "int32": ctypes.c_int32, + "int64": ctypes.c_int64, + "float32": ctypes.c_float, + "float64": ctypes.c_double +} + + +def get_dtype_and_ctype(type_obj: Any) -> Tuple[np.dtype, Any]: + """Given a type name string (or an object having a __name__ attribute), return matching Numpy and ctypes types that have the same size in bytes.""" + type_str = None + + if isinstance(type_obj, str): + type_str = type_obj + elif hasattr(type_obj, "__name__"): + type_str = type_obj.__name__ + elif hasattr(type_obj, "name"): + type_str = type_obj.name + else: + raise RuntimeError("Cannot infer type name from input") + + assert type_str in _str_to_ctype.keys() + + my_dtype = np.dtype(type_str) + my_ctype = _str_to_ctype[type_str] + + assert my_dtype.itemsize == ctypes.sizeof(my_ctype) + + return my_dtype, my_ctype + + +def is_pickleable(obj: Any) -> bool: + try: + with io.BytesIO() as stream: + pickle.dump(obj, stream) + return True + except: + return False + + +# Functionality to import modules/objects by name, and call functions by name +# ------------------------------------------------------------------------------------------ + +def get_module_from_obj_name(obj_name: str) -> Tuple[types.ModuleType, str]: + """Searches for the underlying module behind the name to some python object. + Returns the module and the object name (original name with module part removed).""" + + # allow convenience shorthands, substitute them by full names + obj_name = re.sub("^np.", "numpy.", obj_name) + obj_name = re.sub("^tf.", "tensorflow.", obj_name) + + # list alternatives for (module_name, local_obj_name) + parts = obj_name.split(".") + name_pairs = [(".".join(parts[:i]), ".".join(parts[i:])) for i in range(len(parts), 0, -1)] + + # try each alternative in turn + for module_name, local_obj_name in name_pairs: + try: + module = importlib.import_module(module_name) # may raise ImportError + get_obj_from_module(module, local_obj_name) # may raise AttributeError + return module, local_obj_name + except: + pass + + # maybe some of the modules themselves contain errors? + for module_name, _local_obj_name in name_pairs: + try: + importlib.import_module(module_name) # may raise ImportError + except ImportError: + if not str(sys.exc_info()[1]).startswith("No module named '" + module_name + "'"): + raise + + # maybe the requested attribute is missing? + for module_name, local_obj_name in name_pairs: + try: + module = importlib.import_module(module_name) # may raise ImportError + get_obj_from_module(module, local_obj_name) # may raise AttributeError + except ImportError: + pass + + # we are out of luck, but we have no idea why + raise ImportError(obj_name) + + +def get_obj_from_module(module: types.ModuleType, obj_name: str) -> Any: + """Traverses the object name and returns the last (rightmost) python object.""" + if obj_name == '': + return module + obj = module + for part in obj_name.split("."): + obj = getattr(obj, part) + return obj + + +def get_obj_by_name(name: str) -> Any: + """Finds the python object with the given name.""" + module, obj_name = get_module_from_obj_name(name) + return get_obj_from_module(module, obj_name) + + +def call_func_by_name(*args, func_name: str = None, **kwargs) -> Any: + """Finds the python object with the given name and calls it as a function.""" + assert func_name is not None + func_obj = get_obj_by_name(func_name) + assert callable(func_obj) + return func_obj(*args, **kwargs) + + +def get_module_dir_by_obj_name(obj_name: str) -> str: + """Get the directory path of the module containing the given object name.""" + module, _ = get_module_from_obj_name(obj_name) + return os.path.dirname(inspect.getfile(module)) + + +def is_top_level_function(obj: Any) -> bool: + """Determine whether the given object is a top-level function, i.e., defined at module scope using 'def'.""" + return callable(obj) and obj.__name__ in sys.modules[obj.__module__].__dict__ + + +def get_top_level_function_name(obj: Any) -> str: + """Return the fully-qualified name of a top-level function.""" + assert is_top_level_function(obj) + return obj.__module__ + "." + obj.__name__ + + +# File system helpers +# ------------------------------------------------------------------------------------------ + +def list_dir_recursively_with_ignore(dir_path: str, ignores: List[str] = None, add_base_to_relative: bool = False) -> List[Tuple[str, str]]: + """List all files recursively in a given directory while ignoring given file and directory names. + Returns list of tuples containing both absolute and relative paths.""" + assert os.path.isdir(dir_path) + base_name = os.path.basename(os.path.normpath(dir_path)) + + if ignores is None: + ignores = [] + + result = [] + + for root, dirs, files in os.walk(dir_path, topdown=True): + for ignore_ in ignores: + dirs_to_remove = [d for d in dirs if fnmatch.fnmatch(d, ignore_)] + + # dirs need to be edited in-place + for d in dirs_to_remove: + dirs.remove(d) + + files = [f for f in files if not fnmatch.fnmatch(f, ignore_)] + + absolute_paths = [os.path.join(root, f) for f in files] + relative_paths = [os.path.relpath(p, dir_path) for p in absolute_paths] + + if add_base_to_relative: + relative_paths = [os.path.join(base_name, p) for p in relative_paths] + + assert len(absolute_paths) == len(relative_paths) + result += zip(absolute_paths, relative_paths) + + return result + + +def copy_files_and_create_dirs(files: List[Tuple[str, str]]) -> None: + """Takes in a list of tuples of (src, dst) paths and copies files. + Will create all necessary directories.""" + for file in files: + target_dir_name = os.path.dirname(file[1]) + + # will create all intermediate-level directories + if not os.path.exists(target_dir_name): + os.makedirs(target_dir_name) + + shutil.copyfile(file[0], file[1]) + + +# URL helpers +# ------------------------------------------------------------------------------------------ + +def is_url(obj: Any) -> bool: + """Determine whether the given object is a valid URL string.""" + if not isinstance(obj, str) or not "://" in obj: + return False + try: + res = requests.compat.urlparse(obj) + if not res.scheme or not res.netloc or not "." in res.netloc: + return False + res = requests.compat.urlparse(requests.compat.urljoin(obj, "/")) + if not res.scheme or not res.netloc or not "." in res.netloc: + return False + except: + return False + return True + + +def open_url(url: str, cache_dir: str = None, num_attempts: int = 10, verbose: bool = True) -> Any: + """Download the given URL and return a binary-mode file object to access the data.""" + assert is_url(url) + assert num_attempts >= 1 + + # Lookup from cache. + url_md5 = hashlib.md5(url.encode("utf-8")).hexdigest() + if cache_dir is not None: + cache_files = glob.glob(os.path.join(cache_dir, url_md5 + "_*")) + if len(cache_files) == 1: + return open(cache_files[0], "rb") + + # Download. + url_name = None + url_data = None + with requests.Session() as session: + if verbose: + print("Downloading %s ..." % url, end="", flush=True) + for attempts_left in reversed(range(num_attempts)): + try: + with session.get(url) as res: + res.raise_for_status() + if len(res.content) == 0: + raise IOError("No data received") + + if len(res.content) < 8192: + content_str = res.content.decode("utf-8") + if "download_warning" in res.headers.get("Set-Cookie", ""): + links = [html.unescape(link) for link in content_str.split('"') if "export=download" in link] + if len(links) == 1: + url = requests.compat.urljoin(url, links[0]) + raise IOError("Google Drive virus checker nag") + if "Google Drive - Quota exceeded" in content_str: + raise IOError("Google Drive quota exceeded") + + match = re.search(r'filename="([^"]*)"', res.headers.get("Content-Disposition", "")) + url_name = match[1] if match else url + url_data = res.content + if verbose: + print(" done") + break + except: + if not attempts_left: + if verbose: + print(" failed") + raise + if verbose: + print(".", end="", flush=True) + + # Save to cache. + if cache_dir is not None: + safe_name = re.sub(r"[^0-9a-zA-Z-._]", "_", url_name) + cache_file = os.path.join(cache_dir, url_md5 + "_" + safe_name) + temp_file = os.path.join(cache_dir, "tmp_" + uuid.uuid4().hex + "_" + url_md5 + "_" + safe_name) + os.makedirs(cache_dir, exist_ok=True) + with open(temp_file, "wb") as f: + f.write(url_data) + os.replace(temp_file, cache_file) # atomic + + # Return data as file object. + return io.BytesIO(url_data) diff --git a/models/stylegan_tf_official/generate_figures.py b/models/stylegan_tf_official/generate_figures.py new file mode 100644 index 0000000000000000000000000000000000000000..45b68b86146198c701a66fb8ba7a363d901d6951 --- /dev/null +++ b/models/stylegan_tf_official/generate_figures.py @@ -0,0 +1,161 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +"""Minimal script for reproducing the figures of the StyleGAN paper using pre-trained generators.""" + +import os +import pickle +import numpy as np +import PIL.Image +import dnnlib +import dnnlib.tflib as tflib +import config + +#---------------------------------------------------------------------------- +# Helpers for loading and using pre-trained generators. + +url_ffhq = 'https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ' # karras2019stylegan-ffhq-1024x1024.pkl +url_celebahq = 'https://drive.google.com/uc?id=1MGqJl28pN4t7SAtSrPdSRJSQJqahkzUf' # karras2019stylegan-celebahq-1024x1024.pkl +url_bedrooms = 'https://drive.google.com/uc?id=1MOSKeGF0FJcivpBI7s63V9YHloUTORiF' # karras2019stylegan-bedrooms-256x256.pkl +url_cars = 'https://drive.google.com/uc?id=1MJ6iCfNtMIRicihwRorsM3b7mmtmK9c3' # karras2019stylegan-cars-512x384.pkl +url_cats = 'https://drive.google.com/uc?id=1MQywl0FNt6lHu8E_EUqnRbviagS7fbiJ' # karras2019stylegan-cats-256x256.pkl + +synthesis_kwargs = dict(output_transform=dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True), minibatch_size=8) + +_Gs_cache = dict() + +def load_Gs(url): + if url not in _Gs_cache: + with dnnlib.util.open_url(url, cache_dir=config.cache_dir) as f: + _G, _D, Gs = pickle.load(f) + _Gs_cache[url] = Gs + return _Gs_cache[url] + +#---------------------------------------------------------------------------- +# Figures 2, 3, 10, 11, 12: Multi-resolution grid of uncurated result images. + +def draw_uncurated_result_figure(png, Gs, cx, cy, cw, ch, rows, lods, seed): + print(png) + latents = np.random.RandomState(seed).randn(sum(rows * 2**lod for lod in lods), Gs.input_shape[1]) + images = Gs.run(latents, None, **synthesis_kwargs) # [seed, y, x, rgb] + + canvas = PIL.Image.new('RGB', (sum(cw // 2**lod for lod in lods), ch * rows), 'white') + image_iter = iter(list(images)) + for col, lod in enumerate(lods): + for row in range(rows * 2**lod): + image = PIL.Image.fromarray(next(image_iter), 'RGB') + image = image.crop((cx, cy, cx + cw, cy + ch)) + image = image.resize((cw // 2**lod, ch // 2**lod), PIL.Image.ANTIALIAS) + canvas.paste(image, (sum(cw // 2**lod for lod in lods[:col]), row * ch // 2**lod)) + canvas.save(png) + +#---------------------------------------------------------------------------- +# Figure 3: Style mixing. + +def draw_style_mixing_figure(png, Gs, w, h, src_seeds, dst_seeds, style_ranges): + print(png) + src_latents = np.stack(np.random.RandomState(seed).randn(Gs.input_shape[1]) for seed in src_seeds) + dst_latents = np.stack(np.random.RandomState(seed).randn(Gs.input_shape[1]) for seed in dst_seeds) + src_dlatents = Gs.components.mapping.run(src_latents, None) # [seed, layer, component] + dst_dlatents = Gs.components.mapping.run(dst_latents, None) # [seed, layer, component] + src_images = Gs.components.synthesis.run(src_dlatents, randomize_noise=False, **synthesis_kwargs) + dst_images = Gs.components.synthesis.run(dst_dlatents, randomize_noise=False, **synthesis_kwargs) + + canvas = PIL.Image.new('RGB', (w * (len(src_seeds) + 1), h * (len(dst_seeds) + 1)), 'white') + for col, src_image in enumerate(list(src_images)): + canvas.paste(PIL.Image.fromarray(src_image, 'RGB'), ((col + 1) * w, 0)) + for row, dst_image in enumerate(list(dst_images)): + canvas.paste(PIL.Image.fromarray(dst_image, 'RGB'), (0, (row + 1) * h)) + row_dlatents = np.stack([dst_dlatents[row]] * len(src_seeds)) + row_dlatents[:, style_ranges[row]] = src_dlatents[:, style_ranges[row]] + row_images = Gs.components.synthesis.run(row_dlatents, randomize_noise=False, **synthesis_kwargs) + for col, image in enumerate(list(row_images)): + canvas.paste(PIL.Image.fromarray(image, 'RGB'), ((col + 1) * w, (row + 1) * h)) + canvas.save(png) + +#---------------------------------------------------------------------------- +# Figure 4: Noise detail. + +def draw_noise_detail_figure(png, Gs, w, h, num_samples, seeds): + print(png) + canvas = PIL.Image.new('RGB', (w * 3, h * len(seeds)), 'white') + for row, seed in enumerate(seeds): + latents = np.stack([np.random.RandomState(seed).randn(Gs.input_shape[1])] * num_samples) + images = Gs.run(latents, None, truncation_psi=1, **synthesis_kwargs) + canvas.paste(PIL.Image.fromarray(images[0], 'RGB'), (0, row * h)) + for i in range(4): + crop = PIL.Image.fromarray(images[i + 1], 'RGB') + crop = crop.crop((650, 180, 906, 436)) + crop = crop.resize((w//2, h//2), PIL.Image.NEAREST) + canvas.paste(crop, (w + (i%2) * w//2, row * h + (i//2) * h//2)) + diff = np.std(np.mean(images, axis=3), axis=0) * 4 + diff = np.clip(diff + 0.5, 0, 255).astype(np.uint8) + canvas.paste(PIL.Image.fromarray(diff, 'L'), (w * 2, row * h)) + canvas.save(png) + +#---------------------------------------------------------------------------- +# Figure 5: Noise components. + +def draw_noise_components_figure(png, Gs, w, h, seeds, noise_ranges, flips): + print(png) + Gsc = Gs.clone() + noise_vars = [var for name, var in Gsc.components.synthesis.vars.items() if name.startswith('noise')] + noise_pairs = list(zip(noise_vars, tflib.run(noise_vars))) # [(var, val), ...] + latents = np.stack(np.random.RandomState(seed).randn(Gs.input_shape[1]) for seed in seeds) + all_images = [] + for noise_range in noise_ranges: + tflib.set_vars({var: val * (1 if i in noise_range else 0) for i, (var, val) in enumerate(noise_pairs)}) + range_images = Gsc.run(latents, None, truncation_psi=1, randomize_noise=False, **synthesis_kwargs) + range_images[flips, :, :] = range_images[flips, :, ::-1] + all_images.append(list(range_images)) + + canvas = PIL.Image.new('RGB', (w * 2, h * 2), 'white') + for col, col_images in enumerate(zip(*all_images)): + canvas.paste(PIL.Image.fromarray(col_images[0], 'RGB').crop((0, 0, w//2, h)), (col * w, 0)) + canvas.paste(PIL.Image.fromarray(col_images[1], 'RGB').crop((w//2, 0, w, h)), (col * w + w//2, 0)) + canvas.paste(PIL.Image.fromarray(col_images[2], 'RGB').crop((0, 0, w//2, h)), (col * w, h)) + canvas.paste(PIL.Image.fromarray(col_images[3], 'RGB').crop((w//2, 0, w, h)), (col * w + w//2, h)) + canvas.save(png) + +#---------------------------------------------------------------------------- +# Figure 8: Truncation trick. + +def draw_truncation_trick_figure(png, Gs, w, h, seeds, psis): + print(png) + latents = np.stack(np.random.RandomState(seed).randn(Gs.input_shape[1]) for seed in seeds) + dlatents = Gs.components.mapping.run(latents, None) # [seed, layer, component] + dlatent_avg = Gs.get_var('dlatent_avg') # [component] + + canvas = PIL.Image.new('RGB', (w * len(psis), h * len(seeds)), 'white') + for row, dlatent in enumerate(list(dlatents)): + row_dlatents = (dlatent[np.newaxis] - dlatent_avg) * np.reshape(psis, [-1, 1, 1]) + dlatent_avg + row_images = Gs.components.synthesis.run(row_dlatents, randomize_noise=False, **synthesis_kwargs) + for col, image in enumerate(list(row_images)): + canvas.paste(PIL.Image.fromarray(image, 'RGB'), (col * w, row * h)) + canvas.save(png) + +#---------------------------------------------------------------------------- +# Main program. + +def main(): + tflib.init_tf() + os.makedirs(config.result_dir, exist_ok=True) + draw_uncurated_result_figure(os.path.join(config.result_dir, 'figure02-uncurated-ffhq.png'), load_Gs(url_ffhq), cx=0, cy=0, cw=1024, ch=1024, rows=3, lods=[0,1,2,2,3,3], seed=5) + draw_style_mixing_figure(os.path.join(config.result_dir, 'figure03-style-mixing.png'), load_Gs(url_ffhq), w=1024, h=1024, src_seeds=[639,701,687,615,2268], dst_seeds=[888,829,1898,1733,1614,845], style_ranges=[range(0,4)]*3+[range(4,8)]*2+[range(8,18)]) + draw_noise_detail_figure(os.path.join(config.result_dir, 'figure04-noise-detail.png'), load_Gs(url_ffhq), w=1024, h=1024, num_samples=100, seeds=[1157,1012]) + draw_noise_components_figure(os.path.join(config.result_dir, 'figure05-noise-components.png'), load_Gs(url_ffhq), w=1024, h=1024, seeds=[1967,1555], noise_ranges=[range(0, 18), range(0, 0), range(8, 18), range(0, 8)], flips=[1]) + draw_truncation_trick_figure(os.path.join(config.result_dir, 'figure08-truncation-trick.png'), load_Gs(url_ffhq), w=1024, h=1024, seeds=[91,388], psis=[1, 0.7, 0.5, 0, -0.5, -1]) + draw_uncurated_result_figure(os.path.join(config.result_dir, 'figure10-uncurated-bedrooms.png'), load_Gs(url_bedrooms), cx=0, cy=0, cw=256, ch=256, rows=5, lods=[0,0,1,1,2,2,2], seed=0) + draw_uncurated_result_figure(os.path.join(config.result_dir, 'figure11-uncurated-cars.png'), load_Gs(url_cars), cx=0, cy=64, cw=512, ch=384, rows=4, lods=[0,1,2,2,3,3], seed=2) + draw_uncurated_result_figure(os.path.join(config.result_dir, 'figure12-uncurated-cats.png'), load_Gs(url_cats), cx=0, cy=0, cw=256, ch=256, rows=5, lods=[0,0,1,1,2,2,2], seed=1) + +#---------------------------------------------------------------------------- + +if __name__ == "__main__": + main() + +#---------------------------------------------------------------------------- diff --git a/models/stylegan_tf_official/metrics/__init__.py b/models/stylegan_tf_official/metrics/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..db8124b132f91216c0ded226f20ea3a046734728 --- /dev/null +++ b/models/stylegan_tf_official/metrics/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +# empty diff --git a/models/stylegan_tf_official/metrics/frechet_inception_distance.py b/models/stylegan_tf_official/metrics/frechet_inception_distance.py new file mode 100644 index 0000000000000000000000000000000000000000..41f71fe4bfb85218cc283b3f7bc3a34fea5f790d --- /dev/null +++ b/models/stylegan_tf_official/metrics/frechet_inception_distance.py @@ -0,0 +1,72 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +"""Frechet Inception Distance (FID).""" + +import os +import numpy as np +import scipy +import tensorflow as tf +import dnnlib.tflib as tflib + +from metrics import metric_base +from training import misc + +#---------------------------------------------------------------------------- + +class FID(metric_base.MetricBase): + def __init__(self, num_images, minibatch_per_gpu, **kwargs): + super().__init__(**kwargs) + self.num_images = num_images + self.minibatch_per_gpu = minibatch_per_gpu + + def _evaluate(self, Gs, num_gpus): + minibatch_size = num_gpus * self.minibatch_per_gpu + inception = misc.load_pkl('https://drive.google.com/uc?id=1MzTY44rLToO5APn8TZmfR7_ENSe5aZUn') # inception_v3_features.pkl + activations = np.empty([self.num_images, inception.output_shape[1]], dtype=np.float32) + + # Calculate statistics for reals. + cache_file = self._get_cache_file_for_reals(num_images=self.num_images) + os.makedirs(os.path.dirname(cache_file), exist_ok=True) + if os.path.isfile(cache_file): + mu_real, sigma_real = misc.load_pkl(cache_file) + else: + for idx, images in enumerate(self._iterate_reals(minibatch_size=minibatch_size)): + begin = idx * minibatch_size + end = min(begin + minibatch_size, self.num_images) + activations[begin:end] = inception.run(images[:end-begin], num_gpus=num_gpus, assume_frozen=True) + if end == self.num_images: + break + mu_real = np.mean(activations, axis=0) + sigma_real = np.cov(activations, rowvar=False) + misc.save_pkl((mu_real, sigma_real), cache_file) + + # Construct TensorFlow graph. + result_expr = [] + for gpu_idx in range(num_gpus): + with tf.device('/gpu:%d' % gpu_idx): + Gs_clone = Gs.clone() + inception_clone = inception.clone() + latents = tf.random_normal([self.minibatch_per_gpu] + Gs_clone.input_shape[1:]) + images = Gs_clone.get_output_for(latents, None, is_validation=True, randomize_noise=True) + images = tflib.convert_images_to_uint8(images) + result_expr.append(inception_clone.get_output_for(images)) + + # Calculate statistics for fakes. + for begin in range(0, self.num_images, minibatch_size): + end = min(begin + minibatch_size, self.num_images) + activations[begin:end] = np.concatenate(tflib.run(result_expr), axis=0)[:end-begin] + mu_fake = np.mean(activations, axis=0) + sigma_fake = np.cov(activations, rowvar=False) + + # Calculate FID. + m = np.square(mu_fake - mu_real).sum() + s, _ = scipy.linalg.sqrtm(np.dot(sigma_fake, sigma_real), disp=False) # pylint: disable=no-member + dist = m + np.trace(sigma_fake + sigma_real - 2*s) + self._report_result(np.real(dist)) + +#---------------------------------------------------------------------------- diff --git a/models/stylegan_tf_official/metrics/linear_separability.py b/models/stylegan_tf_official/metrics/linear_separability.py new file mode 100644 index 0000000000000000000000000000000000000000..e50be5a0fea00eba7af2d05cccf74bacedbea1c3 --- /dev/null +++ b/models/stylegan_tf_official/metrics/linear_separability.py @@ -0,0 +1,177 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +"""Linear Separability (LS).""" + +from collections import defaultdict +import numpy as np +import sklearn.svm +import tensorflow as tf +import dnnlib.tflib as tflib + +from metrics import metric_base +from training import misc + +#---------------------------------------------------------------------------- + +classifier_urls = [ + 'https://drive.google.com/uc?id=1Q5-AI6TwWhCVM7Muu4tBM7rp5nG_gmCX', # celebahq-classifier-00-male.pkl + 'https://drive.google.com/uc?id=1Q5c6HE__ReW2W8qYAXpao68V1ryuisGo', # celebahq-classifier-01-smiling.pkl + 'https://drive.google.com/uc?id=1Q7738mgWTljPOJQrZtSMLxzShEhrvVsU', # celebahq-classifier-02-attractive.pkl + 'https://drive.google.com/uc?id=1QBv2Mxe7ZLvOv1YBTLq-T4DS3HjmXV0o', # celebahq-classifier-03-wavy-hair.pkl + 'https://drive.google.com/uc?id=1QIvKTrkYpUrdA45nf7pspwAqXDwWOLhV', # celebahq-classifier-04-young.pkl + 'https://drive.google.com/uc?id=1QJPH5rW7MbIjFUdZT7vRYfyUjNYDl4_L', # celebahq-classifier-05-5-o-clock-shadow.pkl + 'https://drive.google.com/uc?id=1QPZXSYf6cptQnApWS_T83sqFMun3rULY', # celebahq-classifier-06-arched-eyebrows.pkl + 'https://drive.google.com/uc?id=1QPgoAZRqINXk_PFoQ6NwMmiJfxc5d2Pg', # celebahq-classifier-07-bags-under-eyes.pkl + 'https://drive.google.com/uc?id=1QQPQgxgI6wrMWNyxFyTLSgMVZmRr1oO7', # celebahq-classifier-08-bald.pkl + 'https://drive.google.com/uc?id=1QcSphAmV62UrCIqhMGgcIlZfoe8hfWaF', # celebahq-classifier-09-bangs.pkl + 'https://drive.google.com/uc?id=1QdWTVwljClTFrrrcZnPuPOR4mEuz7jGh', # celebahq-classifier-10-big-lips.pkl + 'https://drive.google.com/uc?id=1QgvEWEtr2mS4yj1b_Y3WKe6cLWL3LYmK', # celebahq-classifier-11-big-nose.pkl + 'https://drive.google.com/uc?id=1QidfMk9FOKgmUUIziTCeo8t-kTGwcT18', # celebahq-classifier-12-black-hair.pkl + 'https://drive.google.com/uc?id=1QthrJt-wY31GPtV8SbnZQZ0_UEdhasHO', # celebahq-classifier-13-blond-hair.pkl + 'https://drive.google.com/uc?id=1QvCAkXxdYT4sIwCzYDnCL9Nb5TDYUxGW', # celebahq-classifier-14-blurry.pkl + 'https://drive.google.com/uc?id=1QvLWuwSuWI9Ln8cpxSGHIciUsnmaw8L0', # celebahq-classifier-15-brown-hair.pkl + 'https://drive.google.com/uc?id=1QxW6THPI2fqDoiFEMaV6pWWHhKI_OoA7', # celebahq-classifier-16-bushy-eyebrows.pkl + 'https://drive.google.com/uc?id=1R71xKw8oTW2IHyqmRDChhTBkW9wq4N9v', # celebahq-classifier-17-chubby.pkl + 'https://drive.google.com/uc?id=1RDn_fiLfEGbTc7JjazRXuAxJpr-4Pl67', # celebahq-classifier-18-double-chin.pkl + 'https://drive.google.com/uc?id=1RGBuwXbaz5052bM4VFvaSJaqNvVM4_cI', # celebahq-classifier-19-eyeglasses.pkl + 'https://drive.google.com/uc?id=1RIxOiWxDpUwhB-9HzDkbkLegkd7euRU9', # celebahq-classifier-20-goatee.pkl + 'https://drive.google.com/uc?id=1RPaNiEnJODdr-fwXhUFdoSQLFFZC7rC-', # celebahq-classifier-21-gray-hair.pkl + 'https://drive.google.com/uc?id=1RQH8lPSwOI2K_9XQCZ2Ktz7xm46o80ep', # celebahq-classifier-22-heavy-makeup.pkl + 'https://drive.google.com/uc?id=1RXZM61xCzlwUZKq-X7QhxOg0D2telPow', # celebahq-classifier-23-high-cheekbones.pkl + 'https://drive.google.com/uc?id=1RgASVHW8EWMyOCiRb5fsUijFu-HfxONM', # celebahq-classifier-24-mouth-slightly-open.pkl + 'https://drive.google.com/uc?id=1RkC8JLqLosWMaRne3DARRgolhbtg_wnr', # celebahq-classifier-25-mustache.pkl + 'https://drive.google.com/uc?id=1RqtbtFT2EuwpGTqsTYJDyXdnDsFCPtLO', # celebahq-classifier-26-narrow-eyes.pkl + 'https://drive.google.com/uc?id=1Rs7hU-re8bBMeRHR-fKgMbjPh-RIbrsh', # celebahq-classifier-27-no-beard.pkl + 'https://drive.google.com/uc?id=1RynDJQWdGOAGffmkPVCrLJqy_fciPF9E', # celebahq-classifier-28-oval-face.pkl + 'https://drive.google.com/uc?id=1S0TZ_Hdv5cb06NDaCD8NqVfKy7MuXZsN', # celebahq-classifier-29-pale-skin.pkl + 'https://drive.google.com/uc?id=1S3JPhZH2B4gVZZYCWkxoRP11q09PjCkA', # celebahq-classifier-30-pointy-nose.pkl + 'https://drive.google.com/uc?id=1S3pQuUz-Jiywq_euhsfezWfGkfzLZ87W', # celebahq-classifier-31-receding-hairline.pkl + 'https://drive.google.com/uc?id=1S6nyIl_SEI3M4l748xEdTV2vymB_-lrY', # celebahq-classifier-32-rosy-cheeks.pkl + 'https://drive.google.com/uc?id=1S9P5WCi3GYIBPVYiPTWygrYIUSIKGxbU', # celebahq-classifier-33-sideburns.pkl + 'https://drive.google.com/uc?id=1SANviG-pp08n7AFpE9wrARzozPIlbfCH', # celebahq-classifier-34-straight-hair.pkl + 'https://drive.google.com/uc?id=1SArgyMl6_z7P7coAuArqUC2zbmckecEY', # celebahq-classifier-35-wearing-earrings.pkl + 'https://drive.google.com/uc?id=1SC5JjS5J-J4zXFO9Vk2ZU2DT82TZUza_', # celebahq-classifier-36-wearing-hat.pkl + 'https://drive.google.com/uc?id=1SDAQWz03HGiu0MSOKyn7gvrp3wdIGoj-', # celebahq-classifier-37-wearing-lipstick.pkl + 'https://drive.google.com/uc?id=1SEtrVK-TQUC0XeGkBE9y7L8VXfbchyKX', # celebahq-classifier-38-wearing-necklace.pkl + 'https://drive.google.com/uc?id=1SF_mJIdyGINXoV-I6IAxHB_k5dxiF6M-', # celebahq-classifier-39-wearing-necktie.pkl +] + +#---------------------------------------------------------------------------- + +def prob_normalize(p): + p = np.asarray(p).astype(np.float32) + assert len(p.shape) == 2 + return p / np.sum(p) + +def mutual_information(p): + p = prob_normalize(p) + px = np.sum(p, axis=1) + py = np.sum(p, axis=0) + result = 0.0 + for x in range(p.shape[0]): + p_x = px[x] + for y in range(p.shape[1]): + p_xy = p[x][y] + p_y = py[y] + if p_xy > 0.0: + result += p_xy * np.log2(p_xy / (p_x * p_y)) # get bits as output + return result + +def entropy(p): + p = prob_normalize(p) + result = 0.0 + for x in range(p.shape[0]): + for y in range(p.shape[1]): + p_xy = p[x][y] + if p_xy > 0.0: + result -= p_xy * np.log2(p_xy) + return result + +def conditional_entropy(p): + # H(Y|X) where X corresponds to axis 0, Y to axis 1 + # i.e., How many bits of additional information are needed to where we are on axis 1 if we know where we are on axis 0? + p = prob_normalize(p) + y = np.sum(p, axis=0, keepdims=True) # marginalize to calculate H(Y) + return max(0.0, entropy(y) - mutual_information(p)) # can slip just below 0 due to FP inaccuracies, clean those up. + +#---------------------------------------------------------------------------- + +class LS(metric_base.MetricBase): + def __init__(self, num_samples, num_keep, attrib_indices, minibatch_per_gpu, **kwargs): + assert num_keep <= num_samples + super().__init__(**kwargs) + self.num_samples = num_samples + self.num_keep = num_keep + self.attrib_indices = attrib_indices + self.minibatch_per_gpu = minibatch_per_gpu + + def _evaluate(self, Gs, num_gpus): + minibatch_size = num_gpus * self.minibatch_per_gpu + + # Construct TensorFlow graph for each GPU. + result_expr = [] + for gpu_idx in range(num_gpus): + with tf.device('/gpu:%d' % gpu_idx): + Gs_clone = Gs.clone() + + # Generate images. + latents = tf.random_normal([self.minibatch_per_gpu] + Gs_clone.input_shape[1:]) + dlatents = Gs_clone.components.mapping.get_output_for(latents, None, is_validation=True) + images = Gs_clone.components.synthesis.get_output_for(dlatents, is_validation=True, randomize_noise=True) + + # Downsample to 256x256. The attribute classifiers were built for 256x256. + if images.shape[2] > 256: + factor = images.shape[2] // 256 + images = tf.reshape(images, [-1, images.shape[1], images.shape[2] // factor, factor, images.shape[3] // factor, factor]) + images = tf.reduce_mean(images, axis=[3, 5]) + + # Run classifier for each attribute. + result_dict = dict(latents=latents, dlatents=dlatents[:,-1]) + for attrib_idx in self.attrib_indices: + classifier = misc.load_pkl(classifier_urls[attrib_idx]) + logits = classifier.get_output_for(images, None) + predictions = tf.nn.softmax(tf.concat([logits, -logits], axis=1)) + result_dict[attrib_idx] = predictions + result_expr.append(result_dict) + + # Sampling loop. + results = [] + for _ in range(0, self.num_samples, minibatch_size): + results += tflib.run(result_expr) + results = {key: np.concatenate([value[key] for value in results], axis=0) for key in results[0].keys()} + + # Calculate conditional entropy for each attribute. + conditional_entropies = defaultdict(list) + for attrib_idx in self.attrib_indices: + # Prune the least confident samples. + pruned_indices = list(range(self.num_samples)) + pruned_indices = sorted(pruned_indices, key=lambda i: -np.max(results[attrib_idx][i])) + pruned_indices = pruned_indices[:self.num_keep] + + # Fit SVM to the remaining samples. + svm_targets = np.argmax(results[attrib_idx][pruned_indices], axis=1) + for space in ['latents', 'dlatents']: + svm_inputs = results[space][pruned_indices] + try: + svm = sklearn.svm.LinearSVC() + svm.fit(svm_inputs, svm_targets) + svm.score(svm_inputs, svm_targets) + svm_outputs = svm.predict(svm_inputs) + except: + svm_outputs = svm_targets # assume perfect prediction + + # Calculate conditional entropy. + p = [[np.mean([case == (row, col) for case in zip(svm_outputs, svm_targets)]) for col in (0, 1)] for row in (0, 1)] + conditional_entropies[space].append(conditional_entropy(p)) + + # Calculate separability scores. + scores = {key: 2**np.sum(values) for key, values in conditional_entropies.items()} + self._report_result(scores['latents'], suffix='_z') + self._report_result(scores['dlatents'], suffix='_w') + +#---------------------------------------------------------------------------- diff --git a/models/stylegan_tf_official/metrics/metric_base.py b/models/stylegan_tf_official/metrics/metric_base.py new file mode 100644 index 0000000000000000000000000000000000000000..0db82adecb60260393eaf82bd991575d79085787 --- /dev/null +++ b/models/stylegan_tf_official/metrics/metric_base.py @@ -0,0 +1,142 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +"""Common definitions for GAN metrics.""" + +import os +import time +import hashlib +import numpy as np +import tensorflow as tf +import dnnlib +import dnnlib.tflib as tflib + +import config +from training import misc +from training import dataset + +#---------------------------------------------------------------------------- +# Standard metrics. + +fid50k = dnnlib.EasyDict(func_name='metrics.frechet_inception_distance.FID', name='fid50k', num_images=50000, minibatch_per_gpu=8) +ppl_zfull = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_zfull', num_samples=100000, epsilon=1e-4, space='z', sampling='full', minibatch_per_gpu=16) +ppl_wfull = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_wfull', num_samples=100000, epsilon=1e-4, space='w', sampling='full', minibatch_per_gpu=16) +ppl_zend = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_zend', num_samples=100000, epsilon=1e-4, space='z', sampling='end', minibatch_per_gpu=16) +ppl_wend = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_wend', num_samples=100000, epsilon=1e-4, space='w', sampling='end', minibatch_per_gpu=16) +ls = dnnlib.EasyDict(func_name='metrics.linear_separability.LS', name='ls', num_samples=200000, num_keep=100000, attrib_indices=range(40), minibatch_per_gpu=4) +dummy = dnnlib.EasyDict(func_name='metrics.metric_base.DummyMetric', name='dummy') # for debugging + +#---------------------------------------------------------------------------- +# Base class for metrics. + +class MetricBase: + def __init__(self, name): + self.name = name + self._network_pkl = None + self._dataset_args = None + self._mirror_augment = None + self._results = [] + self._eval_time = None + + def run(self, network_pkl, run_dir=None, dataset_args=None, mirror_augment=None, num_gpus=1, tf_config=None, log_results=True): + self._network_pkl = network_pkl + self._dataset_args = dataset_args + self._mirror_augment = mirror_augment + self._results = [] + + if (dataset_args is None or mirror_augment is None) and run_dir is not None: + run_config = misc.parse_config_for_previous_run(run_dir) + self._dataset_args = dict(run_config['dataset']) + self._dataset_args['shuffle_mb'] = 0 + self._mirror_augment = run_config['train'].get('mirror_augment', False) + + time_begin = time.time() + with tf.Graph().as_default(), tflib.create_session(tf_config).as_default(): # pylint: disable=not-context-manager + _G, _D, Gs = misc.load_pkl(self._network_pkl) + self._evaluate(Gs, num_gpus=num_gpus) + self._eval_time = time.time() - time_begin + + if log_results: + result_str = self.get_result_str() + if run_dir is not None: + log = os.path.join(run_dir, 'metric-%s.txt' % self.name) + with dnnlib.util.Logger(log, 'a'): + print(result_str) + else: + print(result_str) + + def get_result_str(self): + network_name = os.path.splitext(os.path.basename(self._network_pkl))[0] + if len(network_name) > 29: + network_name = '...' + network_name[-26:] + result_str = '%-30s' % network_name + result_str += ' time %-12s' % dnnlib.util.format_time(self._eval_time) + for res in self._results: + result_str += ' ' + self.name + res.suffix + ' ' + result_str += res.fmt % res.value + return result_str + + def update_autosummaries(self): + for res in self._results: + tflib.autosummary.autosummary('Metrics/' + self.name + res.suffix, res.value) + + def _evaluate(self, Gs, num_gpus): + raise NotImplementedError # to be overridden by subclasses + + def _report_result(self, value, suffix='', fmt='%-10.4f'): + self._results += [dnnlib.EasyDict(value=value, suffix=suffix, fmt=fmt)] + + def _get_cache_file_for_reals(self, extension='pkl', **kwargs): + all_args = dnnlib.EasyDict(metric_name=self.name, mirror_augment=self._mirror_augment) + all_args.update(self._dataset_args) + all_args.update(kwargs) + md5 = hashlib.md5(repr(sorted(all_args.items())).encode('utf-8')) + dataset_name = self._dataset_args['tfrecord_dir'].replace('\\', '/').split('/')[-1] + return os.path.join(config.cache_dir, '%s-%s-%s.%s' % (md5.hexdigest(), self.name, dataset_name, extension)) + + def _iterate_reals(self, minibatch_size): + dataset_obj = dataset.load_dataset(data_dir=config.data_dir, **self._dataset_args) + while True: + images, _labels = dataset_obj.get_minibatch_np(minibatch_size) + if self._mirror_augment: + images = misc.apply_mirror_augment(images) + yield images + + def _iterate_fakes(self, Gs, minibatch_size, num_gpus): + while True: + latents = np.random.randn(minibatch_size, *Gs.input_shape[1:]) + fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True) + images = Gs.run(latents, None, output_transform=fmt, is_validation=True, num_gpus=num_gpus, assume_frozen=True) + yield images + +#---------------------------------------------------------------------------- +# Group of multiple metrics. + +class MetricGroup: + def __init__(self, metric_kwarg_list): + self.metrics = [dnnlib.util.call_func_by_name(**kwargs) for kwargs in metric_kwarg_list] + + def run(self, *args, **kwargs): + for metric in self.metrics: + metric.run(*args, **kwargs) + + def get_result_str(self): + return ' '.join(metric.get_result_str() for metric in self.metrics) + + def update_autosummaries(self): + for metric in self.metrics: + metric.update_autosummaries() + +#---------------------------------------------------------------------------- +# Dummy metric for debugging purposes. + +class DummyMetric(MetricBase): + def _evaluate(self, Gs, num_gpus): + _ = Gs, num_gpus + self._report_result(0.0) + +#---------------------------------------------------------------------------- diff --git a/models/stylegan_tf_official/metrics/perceptual_path_length.py b/models/stylegan_tf_official/metrics/perceptual_path_length.py new file mode 100644 index 0000000000000000000000000000000000000000..17271cfdf1545a26ab71d309ce2180532f513bd6 --- /dev/null +++ b/models/stylegan_tf_official/metrics/perceptual_path_length.py @@ -0,0 +1,108 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +"""Perceptual Path Length (PPL).""" + +import numpy as np +import tensorflow as tf +import dnnlib.tflib as tflib + +from metrics import metric_base +from training import misc + +#---------------------------------------------------------------------------- + +# Normalize batch of vectors. +def normalize(v): + return v / tf.sqrt(tf.reduce_sum(tf.square(v), axis=-1, keepdims=True)) + +# Spherical interpolation of a batch of vectors. +def slerp(a, b, t): + a = normalize(a) + b = normalize(b) + d = tf.reduce_sum(a * b, axis=-1, keepdims=True) + p = t * tf.math.acos(d) + c = normalize(b - d * a) + d = a * tf.math.cos(p) + c * tf.math.sin(p) + return normalize(d) + +#---------------------------------------------------------------------------- + +class PPL(metric_base.MetricBase): + def __init__(self, num_samples, epsilon, space, sampling, minibatch_per_gpu, **kwargs): + assert space in ['z', 'w'] + assert sampling in ['full', 'end'] + super().__init__(**kwargs) + self.num_samples = num_samples + self.epsilon = epsilon + self.space = space + self.sampling = sampling + self.minibatch_per_gpu = minibatch_per_gpu + + def _evaluate(self, Gs, num_gpus): + minibatch_size = num_gpus * self.minibatch_per_gpu + + # Construct TensorFlow graph. + distance_expr = [] + for gpu_idx in range(num_gpus): + with tf.device('/gpu:%d' % gpu_idx): + Gs_clone = Gs.clone() + noise_vars = [var for name, var in Gs_clone.components.synthesis.vars.items() if name.startswith('noise')] + + # Generate random latents and interpolation t-values. + lat_t01 = tf.random_normal([self.minibatch_per_gpu * 2] + Gs_clone.input_shape[1:]) + lerp_t = tf.random_uniform([self.minibatch_per_gpu], 0.0, 1.0 if self.sampling == 'full' else 0.0) + + # Interpolate in W or Z. + if self.space == 'w': + dlat_t01 = Gs_clone.components.mapping.get_output_for(lat_t01, None, is_validation=True) + dlat_t0, dlat_t1 = dlat_t01[0::2], dlat_t01[1::2] + dlat_e0 = tflib.lerp(dlat_t0, dlat_t1, lerp_t[:, np.newaxis, np.newaxis]) + dlat_e1 = tflib.lerp(dlat_t0, dlat_t1, lerp_t[:, np.newaxis, np.newaxis] + self.epsilon) + dlat_e01 = tf.reshape(tf.stack([dlat_e0, dlat_e1], axis=1), dlat_t01.shape) + else: # space == 'z' + lat_t0, lat_t1 = lat_t01[0::2], lat_t01[1::2] + lat_e0 = slerp(lat_t0, lat_t1, lerp_t[:, np.newaxis]) + lat_e1 = slerp(lat_t0, lat_t1, lerp_t[:, np.newaxis] + self.epsilon) + lat_e01 = tf.reshape(tf.stack([lat_e0, lat_e1], axis=1), lat_t01.shape) + dlat_e01 = Gs_clone.components.mapping.get_output_for(lat_e01, None, is_validation=True) + + # Synthesize images. + with tf.control_dependencies([var.initializer for var in noise_vars]): # use same noise inputs for the entire minibatch + images = Gs_clone.components.synthesis.get_output_for(dlat_e01, is_validation=True, randomize_noise=False) + + # Crop only the face region. + c = int(images.shape[2] // 8) + images = images[:, :, c*3 : c*7, c*2 : c*6] + + # Downsample image to 256x256 if it's larger than that. VGG was built for 224x224 images. + if images.shape[2] > 256: + factor = images.shape[2] // 256 + images = tf.reshape(images, [-1, images.shape[1], images.shape[2] // factor, factor, images.shape[3] // factor, factor]) + images = tf.reduce_mean(images, axis=[3,5]) + + # Scale dynamic range from [-1,1] to [0,255] for VGG. + images = (images + 1) * (255 / 2) + + # Evaluate perceptual distance. + img_e0, img_e1 = images[0::2], images[1::2] + distance_measure = misc.load_pkl('https://drive.google.com/uc?id=1N2-m9qszOeVC9Tq77WxsLnuWwOedQiD2') # vgg16_zhang_perceptual.pkl + distance_expr.append(distance_measure.get_output_for(img_e0, img_e1) * (1 / self.epsilon**2)) + + # Sampling loop. + all_distances = [] + for _ in range(0, self.num_samples, minibatch_size): + all_distances += tflib.run(distance_expr) + all_distances = np.concatenate(all_distances, axis=0) + + # Reject outliers. + lo = np.percentile(all_distances, 1, interpolation='lower') + hi = np.percentile(all_distances, 99, interpolation='higher') + filtered_distances = np.extract(np.logical_and(lo <= all_distances, all_distances <= hi), all_distances) + self._report_result(np.mean(filtered_distances)) + +#---------------------------------------------------------------------------- diff --git a/models/stylegan_tf_official/pretrained_example.py b/models/stylegan_tf_official/pretrained_example.py new file mode 100644 index 0000000000000000000000000000000000000000..63baef08bfa4bf34f52a0cf63e10a0b6783ac316 --- /dev/null +++ b/models/stylegan_tf_official/pretrained_example.py @@ -0,0 +1,47 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +"""Minimal script for generating an image using pre-trained StyleGAN generator.""" + +import os +import pickle +import numpy as np +import PIL.Image +import dnnlib +import dnnlib.tflib as tflib +import config + +def main(): + # Initialize TensorFlow. + tflib.init_tf() + + # Load pre-trained network. + url = 'https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ' # karras2019stylegan-ffhq-1024x1024.pkl + with dnnlib.util.open_url(url, cache_dir=config.cache_dir) as f: + _G, _D, Gs = pickle.load(f) + # _G = Instantaneous snapshot of the generator. Mainly useful for resuming a previous training run. + # _D = Instantaneous snapshot of the discriminator. Mainly useful for resuming a previous training run. + # Gs = Long-term average of the generator. Yields higher-quality results than the instantaneous snapshot. + + # Print network details. + Gs.print_layers() + + # Pick latent vector. + rnd = np.random.RandomState(5) + latents = rnd.randn(1, Gs.input_shape[1]) + + # Generate image. + fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True) + images = Gs.run(latents, None, truncation_psi=0.7, randomize_noise=True, output_transform=fmt) + + # Save image. + os.makedirs(config.result_dir, exist_ok=True) + png_filename = os.path.join(config.result_dir, 'example.png') + PIL.Image.fromarray(images[0], 'RGB').save(png_filename) + +if __name__ == "__main__": + main() diff --git a/models/stylegan_tf_official/run_metrics.py b/models/stylegan_tf_official/run_metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..5d1597bbd4e16a2535309ea74c3559cae2a5fa53 --- /dev/null +++ b/models/stylegan_tf_official/run_metrics.py @@ -0,0 +1,105 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +"""Main entry point for training StyleGAN and ProGAN networks.""" + +import dnnlib +from dnnlib import EasyDict +import dnnlib.tflib as tflib + +import config +from metrics import metric_base +from training import misc + +#---------------------------------------------------------------------------- + +def run_pickle(submit_config, metric_args, network_pkl, dataset_args, mirror_augment): + ctx = dnnlib.RunContext(submit_config) + tflib.init_tf() + print('Evaluating %s metric on network_pkl "%s"...' % (metric_args.name, network_pkl)) + metric = dnnlib.util.call_func_by_name(**metric_args) + print() + metric.run(network_pkl, dataset_args=dataset_args, mirror_augment=mirror_augment, num_gpus=submit_config.num_gpus) + print() + ctx.close() + +#---------------------------------------------------------------------------- + +def run_snapshot(submit_config, metric_args, run_id, snapshot): + ctx = dnnlib.RunContext(submit_config) + tflib.init_tf() + print('Evaluating %s metric on run_id %s, snapshot %s...' % (metric_args.name, run_id, snapshot)) + run_dir = misc.locate_run_dir(run_id) + network_pkl = misc.locate_network_pkl(run_dir, snapshot) + metric = dnnlib.util.call_func_by_name(**metric_args) + print() + metric.run(network_pkl, run_dir=run_dir, num_gpus=submit_config.num_gpus) + print() + ctx.close() + +#---------------------------------------------------------------------------- + +def run_all_snapshots(submit_config, metric_args, run_id): + ctx = dnnlib.RunContext(submit_config) + tflib.init_tf() + print('Evaluating %s metric on all snapshots of run_id %s...' % (metric_args.name, run_id)) + run_dir = misc.locate_run_dir(run_id) + network_pkls = misc.list_network_pkls(run_dir) + metric = dnnlib.util.call_func_by_name(**metric_args) + print() + for idx, network_pkl in enumerate(network_pkls): + ctx.update('', idx, len(network_pkls)) + metric.run(network_pkl, run_dir=run_dir, num_gpus=submit_config.num_gpus) + print() + ctx.close() + +#---------------------------------------------------------------------------- + +def main(): + submit_config = dnnlib.SubmitConfig() + + # Which metrics to evaluate? + metrics = [] + metrics += [metric_base.fid50k] + #metrics += [metric_base.ppl_zfull] + #metrics += [metric_base.ppl_wfull] + #metrics += [metric_base.ppl_zend] + #metrics += [metric_base.ppl_wend] + #metrics += [metric_base.ls] + #metrics += [metric_base.dummy] + + # Which networks to evaluate them on? + tasks = [] + tasks += [EasyDict(run_func_name='run_metrics.run_pickle', network_pkl='https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ', dataset_args=EasyDict(tfrecord_dir='ffhq', shuffle_mb=0), mirror_augment=True)] # karras2019stylegan-ffhq-1024x1024.pkl + #tasks += [EasyDict(run_func_name='run_metrics.run_snapshot', run_id=100, snapshot=25000)] + #tasks += [EasyDict(run_func_name='run_metrics.run_all_snapshots', run_id=100)] + + # How many GPUs to use? + submit_config.num_gpus = 1 + #submit_config.num_gpus = 2 + #submit_config.num_gpus = 4 + #submit_config.num_gpus = 8 + + # Execute. + submit_config.run_dir_root = dnnlib.submission.submit.get_template_from_path(config.result_dir) + submit_config.run_dir_ignore += config.run_dir_ignore + for task in tasks: + for metric in metrics: + submit_config.run_desc = '%s-%s' % (task.run_func_name, metric.name) + if task.run_func_name.endswith('run_snapshot'): + submit_config.run_desc += '-%s-%s' % (task.run_id, task.snapshot) + if task.run_func_name.endswith('run_all_snapshots'): + submit_config.run_desc += '-%s' % task.run_id + submit_config.run_desc += '-%dgpu' % submit_config.num_gpus + dnnlib.submit_run(submit_config, metric_args=metric, **task) + +#---------------------------------------------------------------------------- + +if __name__ == "__main__": + main() + +#---------------------------------------------------------------------------- diff --git a/models/stylegan_tf_official/train.py b/models/stylegan_tf_official/train.py new file mode 100644 index 0000000000000000000000000000000000000000..29df3c226b87816ceec25752293df08a70d63189 --- /dev/null +++ b/models/stylegan_tf_official/train.py @@ -0,0 +1,192 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +"""Main entry point for training StyleGAN and ProGAN networks.""" + +import copy +import dnnlib +from dnnlib import EasyDict + +import config +from metrics import metric_base + +#---------------------------------------------------------------------------- +# Official training configs for StyleGAN, targeted mainly for FFHQ. + +if 1: + desc = 'sgan' # Description string included in result subdir name. + train = EasyDict(run_func_name='training.training_loop.training_loop') # Options for training loop. + G = EasyDict(func_name='training.networks_stylegan.G_style') # Options for generator network. + D = EasyDict(func_name='training.networks_stylegan.D_basic') # Options for discriminator network. + G_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for generator optimizer. + D_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for discriminator optimizer. + G_loss = EasyDict(func_name='training.loss.G_logistic_nonsaturating') # Options for generator loss. + D_loss = EasyDict(func_name='training.loss.D_logistic_simplegp', r1_gamma=10.0) # Options for discriminator loss. + dataset = EasyDict() # Options for load_dataset(). + sched = EasyDict() # Options for TrainingSchedule. + grid = EasyDict(size='4k', layout='random') # Options for setup_snapshot_image_grid(). + metrics = [metric_base.fid50k] # Options for MetricGroup. + submit_config = dnnlib.SubmitConfig() # Options for dnnlib.submit_run(). + tf_config = {'rnd.np_random_seed': 1000} # Options for tflib.init_tf(). + + # Dataset. + desc += '-ffhq'; dataset = EasyDict(tfrecord_dir='ffhq'); train.mirror_augment = True + #desc += '-ffhq512'; dataset = EasyDict(tfrecord_dir='ffhq', resolution=512); train.mirror_augment = True + #desc += '-ffhq256'; dataset = EasyDict(tfrecord_dir='ffhq', resolution=256); train.mirror_augment = True + #desc += '-celebahq'; dataset = EasyDict(tfrecord_dir='celebahq'); train.mirror_augment = True + #desc += '-bedroom'; dataset = EasyDict(tfrecord_dir='lsun-bedroom-full'); train.mirror_augment = False + #desc += '-car'; dataset = EasyDict(tfrecord_dir='lsun-car-512x384'); train.mirror_augment = False + #desc += '-cat'; dataset = EasyDict(tfrecord_dir='lsun-cat-full'); train.mirror_augment = False + + # Number of GPUs. + #desc += '-1gpu'; submit_config.num_gpus = 1; sched.minibatch_base = 4; sched.minibatch_dict = {4: 128, 8: 128, 16: 128, 32: 64, 64: 32, 128: 16, 256: 8, 512: 4} + #desc += '-2gpu'; submit_config.num_gpus = 2; sched.minibatch_base = 8; sched.minibatch_dict = {4: 256, 8: 256, 16: 128, 32: 64, 64: 32, 128: 16, 256: 8} + #desc += '-4gpu'; submit_config.num_gpus = 4; sched.minibatch_base = 16; sched.minibatch_dict = {4: 512, 8: 256, 16: 128, 32: 64, 64: 32, 128: 16} + desc += '-8gpu'; submit_config.num_gpus = 8; sched.minibatch_base = 32; sched.minibatch_dict = {4: 512, 8: 256, 16: 128, 32: 64, 64: 32} + + # Default options. + train.total_kimg = 25000 + sched.lod_initial_resolution = 8 + sched.G_lrate_dict = {128: 0.0015, 256: 0.002, 512: 0.003, 1024: 0.003} + sched.D_lrate_dict = EasyDict(sched.G_lrate_dict) + + # WGAN-GP loss for CelebA-HQ. + #desc += '-wgangp'; G_loss = EasyDict(func_name='training.loss.G_wgan'); D_loss = EasyDict(func_name='training.loss.D_wgan_gp'); sched.G_lrate_dict = {k: min(v, 0.002) for k, v in sched.G_lrate_dict.items()}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict) + + # Table 1. + #desc += '-tuned-baseline'; G.use_styles = False; G.use_pixel_norm = True; G.use_instance_norm = False; G.mapping_layers = 0; G.truncation_psi = None; G.const_input_layer = False; G.style_mixing_prob = 0.0; G.use_noise = False + #desc += '-add-mapping-and-styles'; G.const_input_layer = False; G.style_mixing_prob = 0.0; G.use_noise = False + #desc += '-remove-traditional-input'; G.style_mixing_prob = 0.0; G.use_noise = False + #desc += '-add-noise-inputs'; G.style_mixing_prob = 0.0 + #desc += '-mixing-regularization' # default + + # Table 2. + #desc += '-mix0'; G.style_mixing_prob = 0.0 + #desc += '-mix50'; G.style_mixing_prob = 0.5 + #desc += '-mix90'; G.style_mixing_prob = 0.9 # default + #desc += '-mix100'; G.style_mixing_prob = 1.0 + + # Table 4. + #desc += '-traditional-0'; G.use_styles = False; G.use_pixel_norm = True; G.use_instance_norm = False; G.mapping_layers = 0; G.truncation_psi = None; G.const_input_layer = False; G.style_mixing_prob = 0.0; G.use_noise = False + #desc += '-traditional-8'; G.use_styles = False; G.use_pixel_norm = True; G.use_instance_norm = False; G.mapping_layers = 8; G.truncation_psi = None; G.const_input_layer = False; G.style_mixing_prob = 0.0; G.use_noise = False + #desc += '-stylebased-0'; G.mapping_layers = 0 + #desc += '-stylebased-1'; G.mapping_layers = 1 + #desc += '-stylebased-2'; G.mapping_layers = 2 + #desc += '-stylebased-8'; G.mapping_layers = 8 # default + +#---------------------------------------------------------------------------- +# Official training configs for Progressive GAN, targeted mainly for CelebA-HQ. + +if 0: + desc = 'pgan' # Description string included in result subdir name. + train = EasyDict(run_func_name='training.training_loop.training_loop') # Options for training loop. + G = EasyDict(func_name='training.networks_progan.G_paper') # Options for generator network. + D = EasyDict(func_name='training.networks_progan.D_paper') # Options for discriminator network. + G_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for generator optimizer. + D_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for discriminator optimizer. + G_loss = EasyDict(func_name='training.loss.G_wgan') # Options for generator loss. + D_loss = EasyDict(func_name='training.loss.D_wgan_gp') # Options for discriminator loss. + dataset = EasyDict() # Options for load_dataset(). + sched = EasyDict() # Options for TrainingSchedule. + grid = EasyDict(size='1080p', layout='random') # Options for setup_snapshot_image_grid(). + metrics = [metric_base.fid50k] # Options for MetricGroup. + submit_config = dnnlib.SubmitConfig() # Options for dnnlib.submit_run(). + tf_config = {'rnd.np_random_seed': 1000} # Options for tflib.init_tf(). + + # Dataset (choose one). + desc += '-celebahq'; dataset = EasyDict(tfrecord_dir='celebahq'); train.mirror_augment = True + #desc += '-celeba'; dataset = EasyDict(tfrecord_dir='celeba'); train.mirror_augment = True + #desc += '-cifar10'; dataset = EasyDict(tfrecord_dir='cifar10') + #desc += '-cifar100'; dataset = EasyDict(tfrecord_dir='cifar100') + #desc += '-svhn'; dataset = EasyDict(tfrecord_dir='svhn') + #desc += '-mnist'; dataset = EasyDict(tfrecord_dir='mnist') + #desc += '-mnistrgb'; dataset = EasyDict(tfrecord_dir='mnistrgb') + #desc += '-syn1024rgb'; dataset = EasyDict(class_name='training.dataset.SyntheticDataset', resolution=1024, num_channels=3) + #desc += '-lsun-airplane'; dataset = EasyDict(tfrecord_dir='lsun-airplane-100k'); train.mirror_augment = True + #desc += '-lsun-bedroom'; dataset = EasyDict(tfrecord_dir='lsun-bedroom-100k'); train.mirror_augment = True + #desc += '-lsun-bicycle'; dataset = EasyDict(tfrecord_dir='lsun-bicycle-100k'); train.mirror_augment = True + #desc += '-lsun-bird'; dataset = EasyDict(tfrecord_dir='lsun-bird-100k'); train.mirror_augment = True + #desc += '-lsun-boat'; dataset = EasyDict(tfrecord_dir='lsun-boat-100k'); train.mirror_augment = True + #desc += '-lsun-bottle'; dataset = EasyDict(tfrecord_dir='lsun-bottle-100k'); train.mirror_augment = True + #desc += '-lsun-bridge'; dataset = EasyDict(tfrecord_dir='lsun-bridge-100k'); train.mirror_augment = True + #desc += '-lsun-bus'; dataset = EasyDict(tfrecord_dir='lsun-bus-100k'); train.mirror_augment = True + #desc += '-lsun-car'; dataset = EasyDict(tfrecord_dir='lsun-car-100k'); train.mirror_augment = True + #desc += '-lsun-cat'; dataset = EasyDict(tfrecord_dir='lsun-cat-100k'); train.mirror_augment = True + #desc += '-lsun-chair'; dataset = EasyDict(tfrecord_dir='lsun-chair-100k'); train.mirror_augment = True + #desc += '-lsun-churchoutdoor'; dataset = EasyDict(tfrecord_dir='lsun-churchoutdoor-100k'); train.mirror_augment = True + #desc += '-lsun-classroom'; dataset = EasyDict(tfrecord_dir='lsun-classroom-100k'); train.mirror_augment = True + #desc += '-lsun-conferenceroom'; dataset = EasyDict(tfrecord_dir='lsun-conferenceroom-100k'); train.mirror_augment = True + #desc += '-lsun-cow'; dataset = EasyDict(tfrecord_dir='lsun-cow-100k'); train.mirror_augment = True + #desc += '-lsun-diningroom'; dataset = EasyDict(tfrecord_dir='lsun-diningroom-100k'); train.mirror_augment = True + #desc += '-lsun-diningtable'; dataset = EasyDict(tfrecord_dir='lsun-diningtable-100k'); train.mirror_augment = True + #desc += '-lsun-dog'; dataset = EasyDict(tfrecord_dir='lsun-dog-100k'); train.mirror_augment = True + #desc += '-lsun-horse'; dataset = EasyDict(tfrecord_dir='lsun-horse-100k'); train.mirror_augment = True + #desc += '-lsun-kitchen'; dataset = EasyDict(tfrecord_dir='lsun-kitchen-100k'); train.mirror_augment = True + #desc += '-lsun-livingroom'; dataset = EasyDict(tfrecord_dir='lsun-livingroom-100k'); train.mirror_augment = True + #desc += '-lsun-motorbike'; dataset = EasyDict(tfrecord_dir='lsun-motorbike-100k'); train.mirror_augment = True + #desc += '-lsun-person'; dataset = EasyDict(tfrecord_dir='lsun-person-100k'); train.mirror_augment = True + #desc += '-lsun-pottedplant'; dataset = EasyDict(tfrecord_dir='lsun-pottedplant-100k'); train.mirror_augment = True + #desc += '-lsun-restaurant'; dataset = EasyDict(tfrecord_dir='lsun-restaurant-100k'); train.mirror_augment = True + #desc += '-lsun-sheep'; dataset = EasyDict(tfrecord_dir='lsun-sheep-100k'); train.mirror_augment = True + #desc += '-lsun-sofa'; dataset = EasyDict(tfrecord_dir='lsun-sofa-100k'); train.mirror_augment = True + #desc += '-lsun-tower'; dataset = EasyDict(tfrecord_dir='lsun-tower-100k'); train.mirror_augment = True + #desc += '-lsun-train'; dataset = EasyDict(tfrecord_dir='lsun-train-100k'); train.mirror_augment = True + #desc += '-lsun-tvmonitor'; dataset = EasyDict(tfrecord_dir='lsun-tvmonitor-100k'); train.mirror_augment = True + + # Conditioning & snapshot options. + #desc += '-cond'; dataset.max_label_size = 'full' # conditioned on full label + #desc += '-cond1'; dataset.max_label_size = 1 # conditioned on first component of the label + #desc += '-g4k'; grid.size = '4k' + #desc += '-grpc'; grid.layout = 'row_per_class' + + # Config presets (choose one). + #desc += '-preset-v1-1gpu'; submit_config.num_gpus = 1; D.mbstd_group_size = 16; sched.minibatch_base = 16; sched.minibatch_dict = {256: 14, 512: 6, 1024: 3}; sched.lod_training_kimg = 800; sched.lod_transition_kimg = 800; train.total_kimg = 19000 + desc += '-preset-v2-1gpu'; submit_config.num_gpus = 1; sched.minibatch_base = 4; sched.minibatch_dict = {4: 128, 8: 128, 16: 128, 32: 64, 64: 32, 128: 16, 256: 8, 512: 4}; sched.G_lrate_dict = {1024: 0.0015}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict); train.total_kimg = 12000 + #desc += '-preset-v2-2gpus'; submit_config.num_gpus = 2; sched.minibatch_base = 8; sched.minibatch_dict = {4: 256, 8: 256, 16: 128, 32: 64, 64: 32, 128: 16, 256: 8}; sched.G_lrate_dict = {512: 0.0015, 1024: 0.002}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict); train.total_kimg = 12000 + #desc += '-preset-v2-4gpus'; submit_config.num_gpus = 4; sched.minibatch_base = 16; sched.minibatch_dict = {4: 512, 8: 256, 16: 128, 32: 64, 64: 32, 128: 16}; sched.G_lrate_dict = {256: 0.0015, 512: 0.002, 1024: 0.003}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict); train.total_kimg = 12000 + #desc += '-preset-v2-8gpus'; submit_config.num_gpus = 8; sched.minibatch_base = 32; sched.minibatch_dict = {4: 512, 8: 256, 16: 128, 32: 64, 64: 32}; sched.G_lrate_dict = {128: 0.0015, 256: 0.002, 512: 0.003, 1024: 0.003}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict); train.total_kimg = 12000 + + # Numerical precision (choose one). + desc += '-fp32'; sched.max_minibatch_per_gpu = {256: 16, 512: 8, 1024: 4} + #desc += '-fp16'; G.dtype = 'float16'; D.dtype = 'float16'; G.pixelnorm_epsilon=1e-4; G_opt.use_loss_scaling = True; D_opt.use_loss_scaling = True; sched.max_minibatch_per_gpu = {512: 16, 1024: 8} + + # Disable individual features. + #desc += '-nogrowing'; sched.lod_initial_resolution = 1024; sched.lod_training_kimg = 0; sched.lod_transition_kimg = 0; train.total_kimg = 10000 + #desc += '-nopixelnorm'; G.use_pixelnorm = False + #desc += '-nowscale'; G.use_wscale = False; D.use_wscale = False + #desc += '-noleakyrelu'; G.use_leakyrelu = False + #desc += '-nosmoothing'; train.G_smoothing_kimg = 0.0 + #desc += '-norepeat'; train.minibatch_repeats = 1 + #desc += '-noreset'; train.reset_opt_for_new_lod = False + + # Special modes. + #desc += '-BENCHMARK'; sched.lod_initial_resolution = 4; sched.lod_training_kimg = 3; sched.lod_transition_kimg = 3; train.total_kimg = (8*2+1)*3; sched.tick_kimg_base = 1; sched.tick_kimg_dict = {}; train.image_snapshot_ticks = 1000; train.network_snapshot_ticks = 1000 + #desc += '-BENCHMARK0'; sched.lod_initial_resolution = 1024; train.total_kimg = 10; sched.tick_kimg_base = 1; sched.tick_kimg_dict = {}; train.image_snapshot_ticks = 1000; train.network_snapshot_ticks = 1000 + #desc += '-VERBOSE'; sched.tick_kimg_base = 1; sched.tick_kimg_dict = {}; train.image_snapshot_ticks = 1; train.network_snapshot_ticks = 100 + #desc += '-GRAPH'; train.save_tf_graph = True + #desc += '-HIST'; train.save_weight_histograms = True + +#---------------------------------------------------------------------------- +# Main entry point for training. +# Calls the function indicated by 'train' using the selected options. + +def main(): + kwargs = EasyDict(train) + kwargs.update(G_args=G, D_args=D, G_opt_args=G_opt, D_opt_args=D_opt, G_loss_args=G_loss, D_loss_args=D_loss) + kwargs.update(dataset_args=dataset, sched_args=sched, grid_args=grid, metric_arg_list=metrics, tf_config=tf_config) + kwargs.submit_config = copy.deepcopy(submit_config) + kwargs.submit_config.run_dir_root = dnnlib.submission.submit.get_template_from_path(config.result_dir) + kwargs.submit_config.run_dir_ignore += config.run_dir_ignore + kwargs.submit_config.run_desc = desc + dnnlib.submit_run(**kwargs) + +#---------------------------------------------------------------------------- + +if __name__ == "__main__": + main() + +#---------------------------------------------------------------------------- diff --git a/models/stylegan_tf_official/training/__init__.py b/models/stylegan_tf_official/training/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..db8124b132f91216c0ded226f20ea3a046734728 --- /dev/null +++ b/models/stylegan_tf_official/training/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +# empty diff --git a/models/stylegan_tf_official/training/dataset.py b/models/stylegan_tf_official/training/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..cf142226b1794b675d61151467444cb65bdaa1a0 --- /dev/null +++ b/models/stylegan_tf_official/training/dataset.py @@ -0,0 +1,241 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +"""Multi-resolution input data pipeline.""" + +import os +import glob +import numpy as np +import tensorflow as tf +import dnnlib +import dnnlib.tflib as tflib + +#---------------------------------------------------------------------------- +# Parse individual image from a tfrecords file. + +def parse_tfrecord_tf(record): + features = tf.parse_single_example(record, features={ + 'shape': tf.FixedLenFeature([3], tf.int64), + 'data': tf.FixedLenFeature([], tf.string)}) + data = tf.decode_raw(features['data'], tf.uint8) + return tf.reshape(data, features['shape']) + +def parse_tfrecord_np(record): + ex = tf.train.Example() + ex.ParseFromString(record) + shape = ex.features.feature['shape'].int64_list.value # temporary pylint workaround # pylint: disable=no-member + data = ex.features.feature['data'].bytes_list.value[0] # temporary pylint workaround # pylint: disable=no-member + return np.fromstring(data, np.uint8).reshape(shape) + +#---------------------------------------------------------------------------- +# Dataset class that loads data from tfrecords files. + +class TFRecordDataset: + def __init__(self, + tfrecord_dir, # Directory containing a collection of tfrecords files. + resolution = None, # Dataset resolution, None = autodetect. + label_file = None, # Relative path of the labels file, None = autodetect. + max_label_size = 0, # 0 = no labels, 'full' = full labels, = N first label components. + repeat = True, # Repeat dataset indefinitely. + shuffle_mb = 4096, # Shuffle data within specified window (megabytes), 0 = disable shuffling. + prefetch_mb = 2048, # Amount of data to prefetch (megabytes), 0 = disable prefetching. + buffer_mb = 256, # Read buffer size (megabytes). + num_threads = 2): # Number of concurrent threads. + + self.tfrecord_dir = tfrecord_dir + self.resolution = None + self.resolution_log2 = None + self.shape = [] # [channel, height, width] + self.dtype = 'uint8' + self.dynamic_range = [0, 255] + self.label_file = label_file + self.label_size = None # [component] + self.label_dtype = None + self._np_labels = None + self._tf_minibatch_in = None + self._tf_labels_var = None + self._tf_labels_dataset = None + self._tf_datasets = dict() + self._tf_iterator = None + self._tf_init_ops = dict() + self._tf_minibatch_np = None + self._cur_minibatch = -1 + self._cur_lod = -1 + + # List tfrecords files and inspect their shapes. + assert os.path.isdir(self.tfrecord_dir) + tfr_files = sorted(glob.glob(os.path.join(self.tfrecord_dir, '*.tfrecords'))) + assert len(tfr_files) >= 1 + tfr_shapes = [] + for tfr_file in tfr_files: + tfr_opt = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.NONE) + for record in tf.python_io.tf_record_iterator(tfr_file, tfr_opt): + tfr_shapes.append(parse_tfrecord_np(record).shape) + break + + # Autodetect label filename. + if self.label_file is None: + guess = sorted(glob.glob(os.path.join(self.tfrecord_dir, '*.labels'))) + if len(guess): + self.label_file = guess[0] + elif not os.path.isfile(self.label_file): + guess = os.path.join(self.tfrecord_dir, self.label_file) + if os.path.isfile(guess): + self.label_file = guess + + # Determine shape and resolution. + max_shape = max(tfr_shapes, key=np.prod) + self.resolution = resolution if resolution is not None else max_shape[1] + self.resolution_log2 = int(np.log2(self.resolution)) + self.shape = [max_shape[0], self.resolution, self.resolution] + tfr_lods = [self.resolution_log2 - int(np.log2(shape[1])) for shape in tfr_shapes] + assert all(shape[0] == max_shape[0] for shape in tfr_shapes) + assert all(shape[1] == shape[2] for shape in tfr_shapes) + assert all(shape[1] == self.resolution // (2**lod) for shape, lod in zip(tfr_shapes, tfr_lods)) + assert all(lod in tfr_lods for lod in range(self.resolution_log2 - 1)) + + # Load labels. + assert max_label_size == 'full' or max_label_size >= 0 + self._np_labels = np.zeros([1<<20, 0], dtype=np.float32) + if self.label_file is not None and max_label_size != 0: + self._np_labels = np.load(self.label_file) + assert self._np_labels.ndim == 2 + if max_label_size != 'full' and self._np_labels.shape[1] > max_label_size: + self._np_labels = self._np_labels[:, :max_label_size] + self.label_size = self._np_labels.shape[1] + self.label_dtype = self._np_labels.dtype.name + + # Build TF expressions. + with tf.name_scope('Dataset'), tf.device('/cpu:0'): + self._tf_minibatch_in = tf.placeholder(tf.int64, name='minibatch_in', shape=[]) + self._tf_labels_var = tflib.create_var_with_large_initial_value(self._np_labels, name='labels_var') + self._tf_labels_dataset = tf.data.Dataset.from_tensor_slices(self._tf_labels_var) + for tfr_file, tfr_shape, tfr_lod in zip(tfr_files, tfr_shapes, tfr_lods): + if tfr_lod < 0: + continue + dset = tf.data.TFRecordDataset(tfr_file, compression_type='', buffer_size=buffer_mb<<20) + dset = dset.map(parse_tfrecord_tf, num_parallel_calls=num_threads) + dset = tf.data.Dataset.zip((dset, self._tf_labels_dataset)) + bytes_per_item = np.prod(tfr_shape) * np.dtype(self.dtype).itemsize + if shuffle_mb > 0: + dset = dset.shuffle(((shuffle_mb << 20) - 1) // bytes_per_item + 1) + if repeat: + dset = dset.repeat() + if prefetch_mb > 0: + dset = dset.prefetch(((prefetch_mb << 20) - 1) // bytes_per_item + 1) + dset = dset.batch(self._tf_minibatch_in) + self._tf_datasets[tfr_lod] = dset + self._tf_iterator = tf.data.Iterator.from_structure(self._tf_datasets[0].output_types, self._tf_datasets[0].output_shapes) + self._tf_init_ops = {lod: self._tf_iterator.make_initializer(dset) for lod, dset in self._tf_datasets.items()} + + # Use the given minibatch size and level-of-detail for the data returned by get_minibatch_tf(). + def configure(self, minibatch_size, lod=0): + lod = int(np.floor(lod)) + assert minibatch_size >= 1 and lod in self._tf_datasets + if self._cur_minibatch != minibatch_size or self._cur_lod != lod: + self._tf_init_ops[lod].run({self._tf_minibatch_in: minibatch_size}) + self._cur_minibatch = minibatch_size + self._cur_lod = lod + + # Get next minibatch as TensorFlow expressions. + def get_minibatch_tf(self): # => images, labels + return self._tf_iterator.get_next() + + # Get next minibatch as NumPy arrays. + def get_minibatch_np(self, minibatch_size, lod=0): # => images, labels + self.configure(minibatch_size, lod) + if self._tf_minibatch_np is None: + self._tf_minibatch_np = self.get_minibatch_tf() + return tflib.run(self._tf_minibatch_np) + + # Get random labels as TensorFlow expression. + def get_random_labels_tf(self, minibatch_size): # => labels + if self.label_size > 0: + with tf.device('/cpu:0'): + return tf.gather(self._tf_labels_var, tf.random_uniform([minibatch_size], 0, self._np_labels.shape[0], dtype=tf.int32)) + return tf.zeros([minibatch_size, 0], self.label_dtype) + + # Get random labels as NumPy array. + def get_random_labels_np(self, minibatch_size): # => labels + if self.label_size > 0: + return self._np_labels[np.random.randint(self._np_labels.shape[0], size=[minibatch_size])] + return np.zeros([minibatch_size, 0], self.label_dtype) + +#---------------------------------------------------------------------------- +# Base class for datasets that are generated on the fly. + +class SyntheticDataset: + def __init__(self, resolution=1024, num_channels=3, dtype='uint8', dynamic_range=[0,255], label_size=0, label_dtype='float32'): + self.resolution = resolution + self.resolution_log2 = int(np.log2(resolution)) + self.shape = [num_channels, resolution, resolution] + self.dtype = dtype + self.dynamic_range = dynamic_range + self.label_size = label_size + self.label_dtype = label_dtype + self._tf_minibatch_var = None + self._tf_lod_var = None + self._tf_minibatch_np = None + self._tf_labels_np = None + + assert self.resolution == 2 ** self.resolution_log2 + with tf.name_scope('Dataset'): + self._tf_minibatch_var = tf.Variable(np.int32(0), name='minibatch_var') + self._tf_lod_var = tf.Variable(np.int32(0), name='lod_var') + + def configure(self, minibatch_size, lod=0): + lod = int(np.floor(lod)) + assert minibatch_size >= 1 and 0 <= lod <= self.resolution_log2 + tflib.set_vars({self._tf_minibatch_var: minibatch_size, self._tf_lod_var: lod}) + + def get_minibatch_tf(self): # => images, labels + with tf.name_scope('SyntheticDataset'): + shrink = tf.cast(2.0 ** tf.cast(self._tf_lod_var, tf.float32), tf.int32) + shape = [self.shape[0], self.shape[1] // shrink, self.shape[2] // shrink] + images = self._generate_images(self._tf_minibatch_var, self._tf_lod_var, shape) + labels = self._generate_labels(self._tf_minibatch_var) + return images, labels + + def get_minibatch_np(self, minibatch_size, lod=0): # => images, labels + self.configure(minibatch_size, lod) + if self._tf_minibatch_np is None: + self._tf_minibatch_np = self.get_minibatch_tf() + return tflib.run(self._tf_minibatch_np) + + def get_random_labels_tf(self, minibatch_size): # => labels + with tf.name_scope('SyntheticDataset'): + return self._generate_labels(minibatch_size) + + def get_random_labels_np(self, minibatch_size): # => labels + self.configure(minibatch_size) + if self._tf_labels_np is None: + self._tf_labels_np = self.get_random_labels_tf(minibatch_size) + return tflib.run(self._tf_labels_np) + + def _generate_images(self, minibatch, lod, shape): # to be overridden by subclasses # pylint: disable=unused-argument + return tf.zeros([minibatch] + shape, self.dtype) + + def _generate_labels(self, minibatch): # to be overridden by subclasses + return tf.zeros([minibatch, self.label_size], self.label_dtype) + +#---------------------------------------------------------------------------- +# Helper func for constructing a dataset object using the given options. + +def load_dataset(class_name='training.dataset.TFRecordDataset', data_dir=None, verbose=False, **kwargs): + adjusted_kwargs = dict(kwargs) + if 'tfrecord_dir' in adjusted_kwargs and data_dir is not None: + adjusted_kwargs['tfrecord_dir'] = os.path.join(data_dir, adjusted_kwargs['tfrecord_dir']) + if verbose: + print('Streaming data using %s...' % class_name) + dataset = dnnlib.util.get_obj_by_name(class_name)(**adjusted_kwargs) + if verbose: + print('Dataset shape =', np.int32(dataset.shape).tolist()) + print('Dynamic range =', dataset.dynamic_range) + print('Label size =', dataset.label_size) + return dataset + +#---------------------------------------------------------------------------- diff --git a/models/stylegan_tf_official/training/loss.py b/models/stylegan_tf_official/training/loss.py new file mode 100644 index 0000000000000000000000000000000000000000..aa59b61bf316f73f269849b54ec3bb35b6a0d61d --- /dev/null +++ b/models/stylegan_tf_official/training/loss.py @@ -0,0 +1,177 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +"""Loss functions.""" + +import tensorflow as tf +import dnnlib.tflib as tflib +from dnnlib.tflib.autosummary import autosummary + +#---------------------------------------------------------------------------- +# Convenience func that casts all of its arguments to tf.float32. + +def fp32(*values): + if len(values) == 1 and isinstance(values[0], tuple): + values = values[0] + values = tuple(tf.cast(v, tf.float32) for v in values) + return values if len(values) >= 2 else values[0] + +#---------------------------------------------------------------------------- +# WGAN & WGAN-GP loss functions. + +def G_wgan(G, D, opt, training_set, minibatch_size): # pylint: disable=unused-argument + latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:]) + labels = training_set.get_random_labels_tf(minibatch_size) + fake_images_out = G.get_output_for(latents, labels, is_training=True) + fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True)) + loss = -fake_scores_out + return loss + +def D_wgan(G, D, opt, training_set, minibatch_size, reals, labels, # pylint: disable=unused-argument + wgan_epsilon = 0.001): # Weight for the epsilon term, \epsilon_{drift}. + + latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:]) + fake_images_out = G.get_output_for(latents, labels, is_training=True) + real_scores_out = fp32(D.get_output_for(reals, labels, is_training=True)) + fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True)) + real_scores_out = autosummary('Loss/scores/real', real_scores_out) + fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out) + loss = fake_scores_out - real_scores_out + + with tf.name_scope('EpsilonPenalty'): + epsilon_penalty = autosummary('Loss/epsilon_penalty', tf.square(real_scores_out)) + loss += epsilon_penalty * wgan_epsilon + return loss + +def D_wgan_gp(G, D, opt, training_set, minibatch_size, reals, labels, # pylint: disable=unused-argument + wgan_lambda = 10.0, # Weight for the gradient penalty term. + wgan_epsilon = 0.001, # Weight for the epsilon term, \epsilon_{drift}. + wgan_target = 1.0): # Target value for gradient magnitudes. + + latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:]) + fake_images_out = G.get_output_for(latents, labels, is_training=True) + real_scores_out = fp32(D.get_output_for(reals, labels, is_training=True)) + fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True)) + real_scores_out = autosummary('Loss/scores/real', real_scores_out) + fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out) + loss = fake_scores_out - real_scores_out + + with tf.name_scope('GradientPenalty'): + mixing_factors = tf.random_uniform([minibatch_size, 1, 1, 1], 0.0, 1.0, dtype=fake_images_out.dtype) + mixed_images_out = tflib.lerp(tf.cast(reals, fake_images_out.dtype), fake_images_out, mixing_factors) + mixed_scores_out = fp32(D.get_output_for(mixed_images_out, labels, is_training=True)) + mixed_scores_out = autosummary('Loss/scores/mixed', mixed_scores_out) + mixed_loss = opt.apply_loss_scaling(tf.reduce_sum(mixed_scores_out)) + mixed_grads = opt.undo_loss_scaling(fp32(tf.gradients(mixed_loss, [mixed_images_out])[0])) + mixed_norms = tf.sqrt(tf.reduce_sum(tf.square(mixed_grads), axis=[1,2,3])) + mixed_norms = autosummary('Loss/mixed_norms', mixed_norms) + gradient_penalty = tf.square(mixed_norms - wgan_target) + loss += gradient_penalty * (wgan_lambda / (wgan_target**2)) + + with tf.name_scope('EpsilonPenalty'): + epsilon_penalty = autosummary('Loss/epsilon_penalty', tf.square(real_scores_out)) + loss += epsilon_penalty * wgan_epsilon + return loss + +#---------------------------------------------------------------------------- +# Hinge loss functions. (Use G_wgan with these) + +def D_hinge(G, D, opt, training_set, minibatch_size, reals, labels): # pylint: disable=unused-argument + latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:]) + fake_images_out = G.get_output_for(latents, labels, is_training=True) + real_scores_out = fp32(D.get_output_for(reals, labels, is_training=True)) + fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True)) + real_scores_out = autosummary('Loss/scores/real', real_scores_out) + fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out) + loss = tf.maximum(0., 1.+fake_scores_out) + tf.maximum(0., 1.-real_scores_out) + return loss + +def D_hinge_gp(G, D, opt, training_set, minibatch_size, reals, labels, # pylint: disable=unused-argument + wgan_lambda = 10.0, # Weight for the gradient penalty term. + wgan_target = 1.0): # Target value for gradient magnitudes. + + latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:]) + fake_images_out = G.get_output_for(latents, labels, is_training=True) + real_scores_out = fp32(D.get_output_for(reals, labels, is_training=True)) + fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True)) + real_scores_out = autosummary('Loss/scores/real', real_scores_out) + fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out) + loss = tf.maximum(0., 1.+fake_scores_out) + tf.maximum(0., 1.-real_scores_out) + + with tf.name_scope('GradientPenalty'): + mixing_factors = tf.random_uniform([minibatch_size, 1, 1, 1], 0.0, 1.0, dtype=fake_images_out.dtype) + mixed_images_out = tflib.lerp(tf.cast(reals, fake_images_out.dtype), fake_images_out, mixing_factors) + mixed_scores_out = fp32(D.get_output_for(mixed_images_out, labels, is_training=True)) + mixed_scores_out = autosummary('Loss/scores/mixed', mixed_scores_out) + mixed_loss = opt.apply_loss_scaling(tf.reduce_sum(mixed_scores_out)) + mixed_grads = opt.undo_loss_scaling(fp32(tf.gradients(mixed_loss, [mixed_images_out])[0])) + mixed_norms = tf.sqrt(tf.reduce_sum(tf.square(mixed_grads), axis=[1,2,3])) + mixed_norms = autosummary('Loss/mixed_norms', mixed_norms) + gradient_penalty = tf.square(mixed_norms - wgan_target) + loss += gradient_penalty * (wgan_lambda / (wgan_target**2)) + return loss + + +#---------------------------------------------------------------------------- +# Loss functions advocated by the paper +# "Which Training Methods for GANs do actually Converge?" + +def G_logistic_saturating(G, D, opt, training_set, minibatch_size): # pylint: disable=unused-argument + latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:]) + labels = training_set.get_random_labels_tf(minibatch_size) + fake_images_out = G.get_output_for(latents, labels, is_training=True) + fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True)) + loss = -tf.nn.softplus(fake_scores_out) # log(1 - logistic(fake_scores_out)) + return loss + +def G_logistic_nonsaturating(G, D, opt, training_set, minibatch_size): # pylint: disable=unused-argument + latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:]) + labels = training_set.get_random_labels_tf(minibatch_size) + fake_images_out = G.get_output_for(latents, labels, is_training=True) + fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True)) + loss = tf.nn.softplus(-fake_scores_out) # -log(logistic(fake_scores_out)) + return loss + +def D_logistic(G, D, opt, training_set, minibatch_size, reals, labels): # pylint: disable=unused-argument + latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:]) + fake_images_out = G.get_output_for(latents, labels, is_training=True) + real_scores_out = fp32(D.get_output_for(reals, labels, is_training=True)) + fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True)) + real_scores_out = autosummary('Loss/scores/real', real_scores_out) + fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out) + loss = tf.nn.softplus(fake_scores_out) # -log(1 - logistic(fake_scores_out)) + loss += tf.nn.softplus(-real_scores_out) # -log(logistic(real_scores_out)) # temporary pylint workaround # pylint: disable=invalid-unary-operand-type + return loss + +def D_logistic_simplegp(G, D, opt, training_set, minibatch_size, reals, labels, r1_gamma=10.0, r2_gamma=0.0): # pylint: disable=unused-argument + latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:]) + fake_images_out = G.get_output_for(latents, labels, is_training=True) + real_scores_out = fp32(D.get_output_for(reals, labels, is_training=True)) + fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True)) + real_scores_out = autosummary('Loss/scores/real', real_scores_out) + fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out) + loss = tf.nn.softplus(fake_scores_out) # -log(1 - logistic(fake_scores_out)) + loss += tf.nn.softplus(-real_scores_out) # -log(logistic(real_scores_out)) # temporary pylint workaround # pylint: disable=invalid-unary-operand-type + + if r1_gamma != 0.0: + with tf.name_scope('R1Penalty'): + real_loss = opt.apply_loss_scaling(tf.reduce_sum(real_scores_out)) + real_grads = opt.undo_loss_scaling(fp32(tf.gradients(real_loss, [reals])[0])) + r1_penalty = tf.reduce_sum(tf.square(real_grads), axis=[1,2,3]) + r1_penalty = autosummary('Loss/r1_penalty', r1_penalty) + loss += r1_penalty * (r1_gamma * 0.5) + + if r2_gamma != 0.0: + with tf.name_scope('R2Penalty'): + fake_loss = opt.apply_loss_scaling(tf.reduce_sum(fake_scores_out)) + fake_grads = opt.undo_loss_scaling(fp32(tf.gradients(fake_loss, [fake_images_out])[0])) + r2_penalty = tf.reduce_sum(tf.square(fake_grads), axis=[1,2,3]) + r2_penalty = autosummary('Loss/r2_penalty', r2_penalty) + loss += r2_penalty * (r2_gamma * 0.5) + return loss + +#---------------------------------------------------------------------------- diff --git a/models/stylegan_tf_official/training/misc.py b/models/stylegan_tf_official/training/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..50ae51c722cb1e553c56051cbd4556110fe4a1f9 --- /dev/null +++ b/models/stylegan_tf_official/training/misc.py @@ -0,0 +1,245 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +"""Miscellaneous utility functions.""" + +import os +import glob +import pickle +import re +import numpy as np +from collections import defaultdict +import PIL.Image +import dnnlib + +import config +from training import dataset + +#---------------------------------------------------------------------------- +# Convenience wrappers for pickle that are able to load data produced by +# older versions of the code, and from external URLs. + +def open_file_or_url(file_or_url): + if dnnlib.util.is_url(file_or_url): + return dnnlib.util.open_url(file_or_url, cache_dir=config.cache_dir) + return open(file_or_url, 'rb') + +def load_pkl(file_or_url): + with open_file_or_url(file_or_url) as file: + return pickle.load(file, encoding='latin1') + +def save_pkl(obj, filename): + with open(filename, 'wb') as file: + pickle.dump(obj, file, protocol=pickle.HIGHEST_PROTOCOL) + +#---------------------------------------------------------------------------- +# Image utils. + +def adjust_dynamic_range(data, drange_in, drange_out): + if drange_in != drange_out: + scale = (np.float32(drange_out[1]) - np.float32(drange_out[0])) / (np.float32(drange_in[1]) - np.float32(drange_in[0])) + bias = (np.float32(drange_out[0]) - np.float32(drange_in[0]) * scale) + data = data * scale + bias + return data + +def create_image_grid(images, grid_size=None): + assert images.ndim == 3 or images.ndim == 4 + num, img_w, img_h = images.shape[0], images.shape[-1], images.shape[-2] + + if grid_size is not None: + grid_w, grid_h = tuple(grid_size) + else: + grid_w = max(int(np.ceil(np.sqrt(num))), 1) + grid_h = max((num - 1) // grid_w + 1, 1) + + grid = np.zeros(list(images.shape[1:-2]) + [grid_h * img_h, grid_w * img_w], dtype=images.dtype) + for idx in range(num): + x = (idx % grid_w) * img_w + y = (idx // grid_w) * img_h + grid[..., y : y + img_h, x : x + img_w] = images[idx] + return grid + +def convert_to_pil_image(image, drange=[0,1]): + assert image.ndim == 2 or image.ndim == 3 + if image.ndim == 3: + if image.shape[0] == 1: + image = image[0] # grayscale CHW => HW + else: + image = image.transpose(1, 2, 0) # CHW -> HWC + + image = adjust_dynamic_range(image, drange, [0,255]) + image = np.rint(image).clip(0, 255).astype(np.uint8) + fmt = 'RGB' if image.ndim == 3 else 'L' + return PIL.Image.fromarray(image, fmt) + +def save_image(image, filename, drange=[0,1], quality=95): + img = convert_to_pil_image(image, drange) + if '.jpg' in filename: + img.save(filename,"JPEG", quality=quality, optimize=True) + else: + img.save(filename) + +def save_image_grid(images, filename, drange=[0,1], grid_size=None): + convert_to_pil_image(create_image_grid(images, grid_size), drange).save(filename) + +#---------------------------------------------------------------------------- +# Locating results. + +def locate_run_dir(run_id_or_run_dir): + if isinstance(run_id_or_run_dir, str): + if os.path.isdir(run_id_or_run_dir): + return run_id_or_run_dir + converted = dnnlib.submission.submit.convert_path(run_id_or_run_dir) + if os.path.isdir(converted): + return converted + + run_dir_pattern = re.compile('^0*%s-' % str(run_id_or_run_dir)) + for search_dir in ['']: + full_search_dir = config.result_dir if search_dir == '' else os.path.normpath(os.path.join(config.result_dir, search_dir)) + run_dir = os.path.join(full_search_dir, str(run_id_or_run_dir)) + if os.path.isdir(run_dir): + return run_dir + run_dirs = sorted(glob.glob(os.path.join(full_search_dir, '*'))) + run_dirs = [run_dir for run_dir in run_dirs if run_dir_pattern.match(os.path.basename(run_dir))] + run_dirs = [run_dir for run_dir in run_dirs if os.path.isdir(run_dir)] + if len(run_dirs) == 1: + return run_dirs[0] + raise IOError('Cannot locate result subdir for run', run_id_or_run_dir) + +def list_network_pkls(run_id_or_run_dir, include_final=True): + run_dir = locate_run_dir(run_id_or_run_dir) + pkls = sorted(glob.glob(os.path.join(run_dir, 'network-*.pkl'))) + if len(pkls) >= 1 and os.path.basename(pkls[0]) == 'network-final.pkl': + if include_final: + pkls.append(pkls[0]) + del pkls[0] + return pkls + +def locate_network_pkl(run_id_or_run_dir_or_network_pkl, snapshot_or_network_pkl=None): + for candidate in [snapshot_or_network_pkl, run_id_or_run_dir_or_network_pkl]: + if isinstance(candidate, str): + if os.path.isfile(candidate): + return candidate + converted = dnnlib.submission.submit.convert_path(candidate) + if os.path.isfile(converted): + return converted + + pkls = list_network_pkls(run_id_or_run_dir_or_network_pkl) + if len(pkls) >= 1 and snapshot_or_network_pkl is None: + return pkls[-1] + + for pkl in pkls: + try: + name = os.path.splitext(os.path.basename(pkl))[0] + number = int(name.split('-')[-1]) + if number == snapshot_or_network_pkl: + return pkl + except ValueError: pass + except IndexError: pass + raise IOError('Cannot locate network pkl for snapshot', snapshot_or_network_pkl) + +def get_id_string_for_network_pkl(network_pkl): + p = network_pkl.replace('.pkl', '').replace('\\', '/').split('/') + return '-'.join(p[max(len(p) - 2, 0):]) + +#---------------------------------------------------------------------------- +# Loading data from previous training runs. + +def load_network_pkl(run_id_or_run_dir_or_network_pkl, snapshot_or_network_pkl=None): + return load_pkl(locate_network_pkl(run_id_or_run_dir_or_network_pkl, snapshot_or_network_pkl)) + +def parse_config_for_previous_run(run_id): + run_dir = locate_run_dir(run_id) + + # Parse config.txt. + cfg = defaultdict(dict) + with open(os.path.join(run_dir, 'config.txt'), 'rt') as f: + for line in f: + line = re.sub(r"^{?\s*'(\w+)':\s*{(.*)(},|}})$", r"\1 = {\2}", line.strip()) + if line.startswith('dataset =') or line.startswith('train ='): + exec(line, cfg, cfg) # pylint: disable=exec-used + + # Handle legacy options. + if 'file_pattern' in cfg['dataset']: + cfg['dataset']['tfrecord_dir'] = cfg['dataset'].pop('file_pattern').replace('-r??.tfrecords', '') + if 'mirror_augment' in cfg['dataset']: + cfg['train']['mirror_augment'] = cfg['dataset'].pop('mirror_augment') + if 'max_labels' in cfg['dataset']: + v = cfg['dataset'].pop('max_labels') + if v is None: v = 0 + if v == 'all': v = 'full' + cfg['dataset']['max_label_size'] = v + if 'max_images' in cfg['dataset']: + cfg['dataset'].pop('max_images') + return cfg + +def load_dataset_for_previous_run(run_id, **kwargs): # => dataset_obj, mirror_augment + cfg = parse_config_for_previous_run(run_id) + cfg['dataset'].update(kwargs) + dataset_obj = dataset.load_dataset(data_dir=config.data_dir, **cfg['dataset']) + mirror_augment = cfg['train'].get('mirror_augment', False) + return dataset_obj, mirror_augment + +def apply_mirror_augment(minibatch): + mask = np.random.rand(minibatch.shape[0]) < 0.5 + minibatch = np.array(minibatch) + minibatch[mask] = minibatch[mask, :, :, ::-1] + return minibatch + +#---------------------------------------------------------------------------- +# Size and contents of the image snapshot grids that are exported +# periodically during training. + +def setup_snapshot_image_grid(G, training_set, + size = '1080p', # '1080p' = to be viewed on 1080p display, '4k' = to be viewed on 4k display. + layout = 'random'): # 'random' = grid contents are selected randomly, 'row_per_class' = each row corresponds to one class label. + + # Select size. + gw = 1; gh = 1 + if size == '1080p': + gw = np.clip(1920 // G.output_shape[3], 3, 32) + gh = np.clip(1080 // G.output_shape[2], 2, 32) + if size == '4k': + gw = np.clip(3840 // G.output_shape[3], 7, 32) + gh = np.clip(2160 // G.output_shape[2], 4, 32) + + # Initialize data arrays. + reals = np.zeros([gw * gh] + training_set.shape, dtype=training_set.dtype) + labels = np.zeros([gw * gh, training_set.label_size], dtype=training_set.label_dtype) + latents = np.random.randn(gw * gh, *G.input_shape[1:]) + + # Random layout. + if layout == 'random': + reals[:], labels[:] = training_set.get_minibatch_np(gw * gh) + + # Class-conditional layouts. + class_layouts = dict(row_per_class=[gw,1], col_per_class=[1,gh], class4x4=[4,4]) + if layout in class_layouts: + bw, bh = class_layouts[layout] + nw = (gw - 1) // bw + 1 + nh = (gh - 1) // bh + 1 + blocks = [[] for _i in range(nw * nh)] + for _iter in range(1000000): + real, label = training_set.get_minibatch_np(1) + idx = np.argmax(label[0]) + while idx < len(blocks) and len(blocks[idx]) >= bw * bh: + idx += training_set.label_size + if idx < len(blocks): + blocks[idx].append((real, label)) + if all(len(block) >= bw * bh for block in blocks): + break + for i, block in enumerate(blocks): + for j, (real, label) in enumerate(block): + x = (i % nw) * bw + j % bw + y = (i // nw) * bh + j // bw + if x < gw and y < gh: + reals[x + y * gw] = real[0] + labels[x + y * gw] = label[0] + + return (gw, gh), reals, labels, latents + +#---------------------------------------------------------------------------- diff --git a/models/stylegan_tf_official/training/networks_progan.py b/models/stylegan_tf_official/training/networks_progan.py new file mode 100644 index 0000000000000000000000000000000000000000..896f500b0bfca5c292b1cba8de79e270f6a08036 --- /dev/null +++ b/models/stylegan_tf_official/training/networks_progan.py @@ -0,0 +1,322 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +"""Network architectures used in the ProGAN paper.""" + +import numpy as np +import tensorflow as tf + +# NOTE: Do not import any application-specific modules here! +# Specify all network parameters as kwargs. + +#---------------------------------------------------------------------------- + +def lerp(a, b, t): return a + (b - a) * t +def lerp_clip(a, b, t): return a + (b - a) * tf.clip_by_value(t, 0.0, 1.0) +def cset(cur_lambda, new_cond, new_lambda): return lambda: tf.cond(new_cond, new_lambda, cur_lambda) + +#---------------------------------------------------------------------------- +# Get/create weight tensor for a convolutional or fully-connected layer. + +def get_weight(shape, gain=np.sqrt(2), use_wscale=False): + fan_in = np.prod(shape[:-1]) # [kernel, kernel, fmaps_in, fmaps_out] or [in, out] + std = gain / np.sqrt(fan_in) # He init + if use_wscale: + wscale = tf.constant(np.float32(std), name='wscale') + w = tf.get_variable('weight', shape=shape, initializer=tf.initializers.random_normal()) * wscale + else: + w = tf.get_variable('weight', shape=shape, initializer=tf.initializers.random_normal(0, std)) + return w + +#---------------------------------------------------------------------------- +# Fully-connected layer. + +def dense(x, fmaps, gain=np.sqrt(2), use_wscale=False): + if len(x.shape) > 2: + x = tf.reshape(x, [-1, np.prod([d.value for d in x.shape[1:]])]) + w = get_weight([x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale) + w = tf.cast(w, x.dtype) + return tf.matmul(x, w) + +#---------------------------------------------------------------------------- +# Convolutional layer. + +def conv2d(x, fmaps, kernel, gain=np.sqrt(2), use_wscale=False): + assert kernel >= 1 and kernel % 2 == 1 + w = get_weight([kernel, kernel, x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale) + w = tf.cast(w, x.dtype) + return tf.nn.conv2d(x, w, strides=[1,1,1,1], padding='SAME', data_format='NCHW') + +#---------------------------------------------------------------------------- +# Apply bias to the given activation tensor. + +def apply_bias(x): + b = tf.get_variable('bias', shape=[x.shape[1]], initializer=tf.initializers.zeros()) + b = tf.cast(b, x.dtype) + if len(x.shape) == 2: + return x + b + return x + tf.reshape(b, [1, -1, 1, 1]) + +#---------------------------------------------------------------------------- +# Leaky ReLU activation. Same as tf.nn.leaky_relu, but supports FP16. + +def leaky_relu(x, alpha=0.2): + with tf.name_scope('LeakyRelu'): + alpha = tf.constant(alpha, dtype=x.dtype, name='alpha') + return tf.maximum(x * alpha, x) + +#---------------------------------------------------------------------------- +# Nearest-neighbor upscaling layer. + +def upscale2d(x, factor=2): + assert isinstance(factor, int) and factor >= 1 + if factor == 1: return x + with tf.variable_scope('Upscale2D'): + s = x.shape + x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1]) + x = tf.tile(x, [1, 1, 1, factor, 1, factor]) + x = tf.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor]) + return x + +#---------------------------------------------------------------------------- +# Fused upscale2d + conv2d. +# Faster and uses less memory than performing the operations separately. + +def upscale2d_conv2d(x, fmaps, kernel, gain=np.sqrt(2), use_wscale=False): + assert kernel >= 1 and kernel % 2 == 1 + w = get_weight([kernel, kernel, x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale) + w = tf.transpose(w, [0, 1, 3, 2]) # [kernel, kernel, fmaps_out, fmaps_in] + w = tf.pad(w, [[1,1], [1,1], [0,0], [0,0]], mode='CONSTANT') + w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]]) + w = tf.cast(w, x.dtype) + os = [tf.shape(x)[0], fmaps, x.shape[2] * 2, x.shape[3] * 2] + return tf.nn.conv2d_transpose(x, w, os, strides=[1,1,2,2], padding='SAME', data_format='NCHW') + +#---------------------------------------------------------------------------- +# Box filter downscaling layer. + +def downscale2d(x, factor=2): + assert isinstance(factor, int) and factor >= 1 + if factor == 1: return x + with tf.variable_scope('Downscale2D'): + ksize = [1, 1, factor, factor] + return tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding='VALID', data_format='NCHW') # NOTE: requires tf_config['graph_options.place_pruned_graph'] = True + +#---------------------------------------------------------------------------- +# Fused conv2d + downscale2d. +# Faster and uses less memory than performing the operations separately. + +def conv2d_downscale2d(x, fmaps, kernel, gain=np.sqrt(2), use_wscale=False): + assert kernel >= 1 and kernel % 2 == 1 + w = get_weight([kernel, kernel, x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale) + w = tf.pad(w, [[1,1], [1,1], [0,0], [0,0]], mode='CONSTANT') + w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]]) * 0.25 + w = tf.cast(w, x.dtype) + return tf.nn.conv2d(x, w, strides=[1,1,2,2], padding='SAME', data_format='NCHW') + +#---------------------------------------------------------------------------- +# Pixelwise feature vector normalization. + +def pixel_norm(x, epsilon=1e-8): + with tf.variable_scope('PixelNorm'): + return x * tf.rsqrt(tf.reduce_mean(tf.square(x), axis=1, keepdims=True) + epsilon) + +#---------------------------------------------------------------------------- +# Minibatch standard deviation. + +def minibatch_stddev_layer(x, group_size=4, num_new_features=1): + with tf.variable_scope('MinibatchStddev'): + group_size = tf.minimum(group_size, tf.shape(x)[0]) # Minibatch must be divisible by (or smaller than) group_size. + s = x.shape # [NCHW] Input shape. + y = tf.reshape(x, [group_size, -1, num_new_features, s[1]//num_new_features, s[2], s[3]]) # [GMncHW] Split minibatch into M groups of size G. Split channels into n channel groups c. + y = tf.cast(y, tf.float32) # [GMncHW] Cast to FP32. + y -= tf.reduce_mean(y, axis=0, keepdims=True) # [GMncHW] Subtract mean over group. + y = tf.reduce_mean(tf.square(y), axis=0) # [MncHW] Calc variance over group. + y = tf.sqrt(y + 1e-8) # [MncHW] Calc stddev over group. + y = tf.reduce_mean(y, axis=[2,3,4], keepdims=True) # [Mn111] Take average over fmaps and pixels. + y = tf.reduce_mean(y, axis=[2]) # [Mn11] Split channels into c channel groups + y = tf.cast(y, x.dtype) # [Mn11] Cast back to original data type. + y = tf.tile(y, [group_size, 1, s[2], s[3]]) # [NnHW] Replicate over group and pixels. + return tf.concat([x, y], axis=1) # [NCHW] Append as new fmap. + +#---------------------------------------------------------------------------- +# Networks used in the ProgressiveGAN paper. + +def G_paper( + latents_in, # First input: Latent vectors [minibatch, latent_size]. + labels_in, # Second input: Labels [minibatch, label_size]. + num_channels = 1, # Number of output color channels. Overridden based on dataset. + resolution = 32, # Output resolution. Overridden based on dataset. + label_size = 0, # Dimensionality of the labels, 0 if no labels. Overridden based on dataset. + fmap_base = 8192, # Overall multiplier for the number of feature maps. + fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution. + fmap_max = 512, # Maximum number of feature maps in any layer. + latent_size = None, # Dimensionality of the latent vectors. None = min(fmap_base, fmap_max). + normalize_latents = True, # Normalize latent vectors before feeding them to the network? + use_wscale = True, # Enable equalized learning rate? + use_pixelnorm = True, # Enable pixelwise feature vector normalization? + pixelnorm_epsilon = 1e-8, # Constant epsilon for pixelwise feature vector normalization. + use_leakyrelu = True, # True = leaky ReLU, False = ReLU. + dtype = 'float32', # Data type to use for activations and outputs. + fused_scale = True, # True = use fused upscale2d + conv2d, False = separate upscale2d layers. + structure = None, # 'linear' = human-readable, 'recursive' = efficient, None = select automatically. + is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation. + **_kwargs): # Ignore unrecognized keyword args. + + resolution_log2 = int(np.log2(resolution)) + assert resolution == 2**resolution_log2 and resolution >= 4 + def nf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max) + def PN(x): return pixel_norm(x, epsilon=pixelnorm_epsilon) if use_pixelnorm else x + if latent_size is None: latent_size = nf(0) + if structure is None: structure = 'linear' if is_template_graph else 'recursive' + act = leaky_relu if use_leakyrelu else tf.nn.relu + + latents_in.set_shape([None, latent_size]) + labels_in.set_shape([None, label_size]) + combo_in = tf.cast(tf.concat([latents_in, labels_in], axis=1), dtype) + lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0.0), trainable=False), dtype) + images_out = None + + # Building blocks. + def block(x, res): # res = 2..resolution_log2 + with tf.variable_scope('%dx%d' % (2**res, 2**res)): + if res == 2: # 4x4 + if normalize_latents: x = pixel_norm(x, epsilon=pixelnorm_epsilon) + with tf.variable_scope('Dense'): + x = dense(x, fmaps=nf(res-1)*16, gain=np.sqrt(2)/4, use_wscale=use_wscale) # override gain to match the original Theano implementation + x = tf.reshape(x, [-1, nf(res-1), 4, 4]) + x = PN(act(apply_bias(x))) + with tf.variable_scope('Conv'): + x = PN(act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale)))) + else: # 8x8 and up + if fused_scale: + with tf.variable_scope('Conv0_up'): + x = PN(act(apply_bias(upscale2d_conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale)))) + else: + x = upscale2d(x) + with tf.variable_scope('Conv0'): + x = PN(act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale)))) + with tf.variable_scope('Conv1'): + x = PN(act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale)))) + return x + def torgb(x, res): # res = 2..resolution_log2 + lod = resolution_log2 - res + with tf.variable_scope('ToRGB_lod%d' % lod): + return apply_bias(conv2d(x, fmaps=num_channels, kernel=1, gain=1, use_wscale=use_wscale)) + + # Linear structure: simple but inefficient. + if structure == 'linear': + x = block(combo_in, 2) + images_out = torgb(x, 2) + for res in range(3, resolution_log2 + 1): + lod = resolution_log2 - res + x = block(x, res) + img = torgb(x, res) + images_out = upscale2d(images_out) + with tf.variable_scope('Grow_lod%d' % lod): + images_out = lerp_clip(img, images_out, lod_in - lod) + + # Recursive structure: complex but efficient. + if structure == 'recursive': + def grow(x, res, lod): + y = block(x, res) + img = lambda: upscale2d(torgb(y, res), 2**lod) + if res > 2: img = cset(img, (lod_in > lod), lambda: upscale2d(lerp(torgb(y, res), upscale2d(torgb(x, res - 1)), lod_in - lod), 2**lod)) + if lod > 0: img = cset(img, (lod_in < lod), lambda: grow(y, res + 1, lod - 1)) + return img() + images_out = grow(combo_in, 2, resolution_log2 - 2) + + assert images_out.dtype == tf.as_dtype(dtype) + images_out = tf.identity(images_out, name='images_out') + return images_out + + +def D_paper( + images_in, # First input: Images [minibatch, channel, height, width]. + labels_in, # Second input: Labels [minibatch, label_size]. + num_channels = 1, # Number of input color channels. Overridden based on dataset. + resolution = 32, # Input resolution. Overridden based on dataset. + label_size = 0, # Dimensionality of the labels, 0 if no labels. Overridden based on dataset. + fmap_base = 8192, # Overall multiplier for the number of feature maps. + fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution. + fmap_max = 512, # Maximum number of feature maps in any layer. + use_wscale = True, # Enable equalized learning rate? + mbstd_group_size = 4, # Group size for the minibatch standard deviation layer, 0 = disable. + dtype = 'float32', # Data type to use for activations and outputs. + fused_scale = True, # True = use fused conv2d + downscale2d, False = separate downscale2d layers. + structure = None, # 'linear' = human-readable, 'recursive' = efficient, None = select automatically + is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation. + **_kwargs): # Ignore unrecognized keyword args. + + resolution_log2 = int(np.log2(resolution)) + assert resolution == 2**resolution_log2 and resolution >= 4 + def nf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max) + if structure is None: structure = 'linear' if is_template_graph else 'recursive' + act = leaky_relu + + images_in.set_shape([None, num_channels, resolution, resolution]) + labels_in.set_shape([None, label_size]) + images_in = tf.cast(images_in, dtype) + labels_in = tf.cast(labels_in, dtype) + lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0.0), trainable=False), dtype) + scores_out = None + + # Building blocks. + def fromrgb(x, res): # res = 2..resolution_log2 + with tf.variable_scope('FromRGB_lod%d' % (resolution_log2 - res)): + return act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=1, use_wscale=use_wscale))) + def block(x, res): # res = 2..resolution_log2 + with tf.variable_scope('%dx%d' % (2**res, 2**res)): + if res >= 3: # 8x8 and up + with tf.variable_scope('Conv0'): + x = act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale))) + if fused_scale: + with tf.variable_scope('Conv1_down'): + x = act(apply_bias(conv2d_downscale2d(x, fmaps=nf(res-2), kernel=3, use_wscale=use_wscale))) + else: + with tf.variable_scope('Conv1'): + x = act(apply_bias(conv2d(x, fmaps=nf(res-2), kernel=3, use_wscale=use_wscale))) + x = downscale2d(x) + else: # 4x4 + if mbstd_group_size > 1: + x = minibatch_stddev_layer(x, mbstd_group_size) + with tf.variable_scope('Conv'): + x = act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale))) + with tf.variable_scope('Dense0'): + x = act(apply_bias(dense(x, fmaps=nf(res-2), use_wscale=use_wscale))) + with tf.variable_scope('Dense1'): + x = apply_bias(dense(x, fmaps=1, gain=1, use_wscale=use_wscale)) + return x + + # Linear structure: simple but inefficient. + if structure == 'linear': + img = images_in + x = fromrgb(img, resolution_log2) + for res in range(resolution_log2, 2, -1): + lod = resolution_log2 - res + x = block(x, res) + img = downscale2d(img) + y = fromrgb(img, res - 1) + with tf.variable_scope('Grow_lod%d' % lod): + x = lerp_clip(x, y, lod_in - lod) + scores_out = block(x, 2) + + # Recursive structure: complex but efficient. + if structure == 'recursive': + def grow(res, lod): + x = lambda: fromrgb(downscale2d(images_in, 2**lod), res) + if lod > 0: x = cset(x, (lod_in < lod), lambda: grow(res + 1, lod - 1)) + x = block(x(), res); y = lambda: x + if res > 2: y = cset(y, (lod_in > lod), lambda: lerp(x, fromrgb(downscale2d(images_in, 2**(lod+1)), res - 1), lod_in - lod)) + return y() + scores_out = grow(2, resolution_log2 - 2) + + assert scores_out.dtype == tf.as_dtype(dtype) + scores_out = tf.identity(scores_out, name='scores_out') + return scores_out + +#---------------------------------------------------------------------------- diff --git a/models/stylegan_tf_official/training/networks_stylegan.py b/models/stylegan_tf_official/training/networks_stylegan.py new file mode 100644 index 0000000000000000000000000000000000000000..adc4b260f6f94570c793b0086280f757d2e19ad1 --- /dev/null +++ b/models/stylegan_tf_official/training/networks_stylegan.py @@ -0,0 +1,661 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +"""Network architectures used in the StyleGAN paper.""" + +import numpy as np +import tensorflow as tf +import dnnlib +import dnnlib.tflib as tflib + +# NOTE: Do not import any application-specific modules here! +# Specify all network parameters as kwargs. + +#---------------------------------------------------------------------------- +# Primitive ops for manipulating 4D activation tensors. +# The gradients of these are not necessary efficient or even meaningful. + +def _blur2d(x, f=[1,2,1], normalize=True, flip=False, stride=1): + assert x.shape.ndims == 4 and all(dim.value is not None for dim in x.shape[1:]) + assert isinstance(stride, int) and stride >= 1 + + # Finalize filter kernel. + f = np.array(f, dtype=np.float32) + if f.ndim == 1: + f = f[:, np.newaxis] * f[np.newaxis, :] + assert f.ndim == 2 + if normalize: + f /= np.sum(f) + if flip: + f = f[::-1, ::-1] + f = f[:, :, np.newaxis, np.newaxis] + f = np.tile(f, [1, 1, int(x.shape[1]), 1]) + + # No-op => early exit. + if f.shape == (1, 1) and f[0,0] == 1: + return x + + # Convolve using depthwise_conv2d. + orig_dtype = x.dtype + x = tf.cast(x, tf.float32) # tf.nn.depthwise_conv2d() doesn't support fp16 + f = tf.constant(f, dtype=x.dtype, name='filter') + strides = [1, 1, stride, stride] + x = tf.nn.depthwise_conv2d(x, f, strides=strides, padding='SAME', data_format='NCHW') + x = tf.cast(x, orig_dtype) + return x + +def _upscale2d(x, factor=2, gain=1): + assert x.shape.ndims == 4 and all(dim.value is not None for dim in x.shape[1:]) + assert isinstance(factor, int) and factor >= 1 + + # Apply gain. + if gain != 1: + x *= gain + + # No-op => early exit. + if factor == 1: + return x + + # Upscale using tf.tile(). + s = x.shape + x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1]) + x = tf.tile(x, [1, 1, 1, factor, 1, factor]) + x = tf.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor]) + return x + +def _downscale2d(x, factor=2, gain=1): + assert x.shape.ndims == 4 and all(dim.value is not None for dim in x.shape[1:]) + assert isinstance(factor, int) and factor >= 1 + + # 2x2, float32 => downscale using _blur2d(). + if factor == 2 and x.dtype == tf.float32: + f = [np.sqrt(gain) / factor] * factor + return _blur2d(x, f=f, normalize=False, stride=factor) + + # Apply gain. + if gain != 1: + x *= gain + + # No-op => early exit. + if factor == 1: + return x + + # Large factor => downscale using tf.nn.avg_pool(). + # NOTE: Requires tf_config['graph_options.place_pruned_graph']=True to work. + ksize = [1, 1, factor, factor] + return tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding='VALID', data_format='NCHW') + +#---------------------------------------------------------------------------- +# High-level ops for manipulating 4D activation tensors. +# The gradients of these are meant to be as efficient as possible. + +def blur2d(x, f=[1,2,1], normalize=True): + with tf.variable_scope('Blur2D'): + @tf.custom_gradient + def func(x): + y = _blur2d(x, f, normalize) + @tf.custom_gradient + def grad(dy): + dx = _blur2d(dy, f, normalize, flip=True) + return dx, lambda ddx: _blur2d(ddx, f, normalize) + return y, grad + return func(x) + +def upscale2d(x, factor=2): + with tf.variable_scope('Upscale2D'): + @tf.custom_gradient + def func(x): + y = _upscale2d(x, factor) + @tf.custom_gradient + def grad(dy): + dx = _downscale2d(dy, factor, gain=factor**2) + return dx, lambda ddx: _upscale2d(ddx, factor) + return y, grad + return func(x) + +def downscale2d(x, factor=2): + with tf.variable_scope('Downscale2D'): + @tf.custom_gradient + def func(x): + y = _downscale2d(x, factor) + @tf.custom_gradient + def grad(dy): + dx = _upscale2d(dy, factor, gain=1/factor**2) + return dx, lambda ddx: _downscale2d(ddx, factor) + return y, grad + return func(x) + +#---------------------------------------------------------------------------- +# Get/create weight tensor for a convolutional or fully-connected layer. + +def get_weight(shape, gain=np.sqrt(2), use_wscale=False, lrmul=1): + fan_in = np.prod(shape[:-1]) # [kernel, kernel, fmaps_in, fmaps_out] or [in, out] + he_std = gain / np.sqrt(fan_in) # He init + + # Equalized learning rate and custom learning rate multiplier. + if use_wscale: + init_std = 1.0 / lrmul + runtime_coef = he_std * lrmul + else: + init_std = he_std / lrmul + runtime_coef = lrmul + + # Create variable. + init = tf.initializers.random_normal(0, init_std) + return tf.get_variable('weight', shape=shape, initializer=init) * runtime_coef + +#---------------------------------------------------------------------------- +# Fully-connected layer. + +def dense(x, fmaps, **kwargs): + if len(x.shape) > 2: + x = tf.reshape(x, [-1, np.prod([d.value for d in x.shape[1:]])]) + w = get_weight([x.shape[1].value, fmaps], **kwargs) + w = tf.cast(w, x.dtype) + return tf.matmul(x, w) + +#---------------------------------------------------------------------------- +# Convolutional layer. + +def conv2d(x, fmaps, kernel, **kwargs): + assert kernel >= 1 and kernel % 2 == 1 + w = get_weight([kernel, kernel, x.shape[1].value, fmaps], **kwargs) + w = tf.cast(w, x.dtype) + return tf.nn.conv2d(x, w, strides=[1,1,1,1], padding='SAME', data_format='NCHW') + +#---------------------------------------------------------------------------- +# Fused convolution + scaling. +# Faster and uses less memory than performing the operations separately. + +def upscale2d_conv2d(x, fmaps, kernel, fused_scale='auto', **kwargs): + assert kernel >= 1 and kernel % 2 == 1 + assert fused_scale in [True, False, 'auto'] + if fused_scale == 'auto': + fused_scale = min(x.shape[2:]) * 2 >= 128 + + # Not fused => call the individual ops directly. + if not fused_scale: + return conv2d(upscale2d(x), fmaps, kernel, **kwargs) + + # Fused => perform both ops simultaneously using tf.nn.conv2d_transpose(). + w = get_weight([kernel, kernel, x.shape[1].value, fmaps], **kwargs) + w = tf.transpose(w, [0, 1, 3, 2]) # [kernel, kernel, fmaps_out, fmaps_in] + w = tf.pad(w, [[1,1], [1,1], [0,0], [0,0]], mode='CONSTANT') + w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]]) + w = tf.cast(w, x.dtype) + os = [tf.shape(x)[0], fmaps, x.shape[2] * 2, x.shape[3] * 2] + return tf.nn.conv2d_transpose(x, w, os, strides=[1,1,2,2], padding='SAME', data_format='NCHW') + +def conv2d_downscale2d(x, fmaps, kernel, fused_scale='auto', **kwargs): + assert kernel >= 1 and kernel % 2 == 1 + assert fused_scale in [True, False, 'auto'] + if fused_scale == 'auto': + fused_scale = min(x.shape[2:]) >= 128 + + # Not fused => call the individual ops directly. + if not fused_scale: + return downscale2d(conv2d(x, fmaps, kernel, **kwargs)) + + # Fused => perform both ops simultaneously using tf.nn.conv2d(). + w = get_weight([kernel, kernel, x.shape[1].value, fmaps], **kwargs) + w = tf.pad(w, [[1,1], [1,1], [0,0], [0,0]], mode='CONSTANT') + w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]]) * 0.25 + w = tf.cast(w, x.dtype) + return tf.nn.conv2d(x, w, strides=[1,1,2,2], padding='SAME', data_format='NCHW') + +#---------------------------------------------------------------------------- +# Apply bias to the given activation tensor. + +def apply_bias(x, lrmul=1): + b = tf.get_variable('bias', shape=[x.shape[1]], initializer=tf.initializers.zeros()) * lrmul + b = tf.cast(b, x.dtype) + if len(x.shape) == 2: + return x + b + return x + tf.reshape(b, [1, -1, 1, 1]) + +#---------------------------------------------------------------------------- +# Leaky ReLU activation. More efficient than tf.nn.leaky_relu() and supports FP16. + +def leaky_relu(x, alpha=0.2): + with tf.variable_scope('LeakyReLU'): + alpha = tf.constant(alpha, dtype=x.dtype, name='alpha') + @tf.custom_gradient + def func(x): + y = tf.maximum(x, x * alpha) + @tf.custom_gradient + def grad(dy): + dx = tf.where(y >= 0, dy, dy * alpha) + return dx, lambda ddx: tf.where(y >= 0, ddx, ddx * alpha) + return y, grad + return func(x) + +#---------------------------------------------------------------------------- +# Pixelwise feature vector normalization. + +def pixel_norm(x, epsilon=1e-8): + with tf.variable_scope('PixelNorm'): + epsilon = tf.constant(epsilon, dtype=x.dtype, name='epsilon') + return x * tf.rsqrt(tf.reduce_mean(tf.square(x), axis=1, keepdims=True) + epsilon) + +#---------------------------------------------------------------------------- +# Instance normalization. + +def instance_norm(x, epsilon=1e-8): + assert len(x.shape) == 4 # NCHW + with tf.variable_scope('InstanceNorm'): + orig_dtype = x.dtype + x = tf.cast(x, tf.float32) + x -= tf.reduce_mean(x, axis=[2,3], keepdims=True) + epsilon = tf.constant(epsilon, dtype=x.dtype, name='epsilon') + x *= tf.rsqrt(tf.reduce_mean(tf.square(x), axis=[2,3], keepdims=True) + epsilon) + x = tf.cast(x, orig_dtype) + return x + +#---------------------------------------------------------------------------- +# Style modulation. + +def style_mod(x, dlatent, **kwargs): + with tf.variable_scope('StyleMod'): + style = apply_bias(dense(dlatent, fmaps=x.shape[1]*2, gain=1, **kwargs)) + style = tf.reshape(style, [-1, 2, x.shape[1]] + [1] * (len(x.shape) - 2)) + return x * (style[:,0] + 1) + style[:,1] + +#---------------------------------------------------------------------------- +# Noise input. + +def apply_noise(x, noise_var=None, randomize_noise=True): + assert len(x.shape) == 4 # NCHW + with tf.variable_scope('Noise'): + if noise_var is None or randomize_noise: + noise = tf.random_normal([tf.shape(x)[0], 1, x.shape[2], x.shape[3]], dtype=x.dtype) + else: + noise = tf.cast(noise_var, x.dtype) + weight = tf.get_variable('weight', shape=[x.shape[1].value], initializer=tf.initializers.zeros()) + return x + noise * tf.reshape(tf.cast(weight, x.dtype), [1, -1, 1, 1]) + +#---------------------------------------------------------------------------- +# Minibatch standard deviation. + +def minibatch_stddev_layer(x, group_size=4, num_new_features=1): + with tf.variable_scope('MinibatchStddev'): + group_size = tf.minimum(group_size, tf.shape(x)[0]) # Minibatch must be divisible by (or smaller than) group_size. + s = x.shape # [NCHW] Input shape. + y = tf.reshape(x, [group_size, -1, num_new_features, s[1]//num_new_features, s[2], s[3]]) # [GMncHW] Split minibatch into M groups of size G. Split channels into n channel groups c. + y = tf.cast(y, tf.float32) # [GMncHW] Cast to FP32. + y -= tf.reduce_mean(y, axis=0, keepdims=True) # [GMncHW] Subtract mean over group. + y = tf.reduce_mean(tf.square(y), axis=0) # [MncHW] Calc variance over group. + y = tf.sqrt(y + 1e-8) # [MncHW] Calc stddev over group. + y = tf.reduce_mean(y, axis=[2,3,4], keepdims=True) # [Mn111] Take average over fmaps and pixels. + y = tf.reduce_mean(y, axis=[2]) # [Mn11] Split channels into c channel groups + y = tf.cast(y, x.dtype) # [Mn11] Cast back to original data type. + y = tf.tile(y, [group_size, 1, s[2], s[3]]) # [NnHW] Replicate over group and pixels. + return tf.concat([x, y], axis=1) # [NCHW] Append as new fmap. + +#---------------------------------------------------------------------------- +# Style-based generator used in the StyleGAN paper. +# Composed of two sub-networks (G_mapping and G_synthesis) that are defined below. + +def G_style( + latents_in, # First input: Latent vectors (Z) [minibatch, latent_size]. + labels_in, # Second input: Conditioning labels [minibatch, label_size]. + truncation_psi = 0.7, # Style strength multiplier for the truncation trick. None = disable. + truncation_cutoff = 8, # Number of layers for which to apply the truncation trick. None = disable. + truncation_psi_val = None, # Value for truncation_psi to use during validation. + truncation_cutoff_val = None, # Value for truncation_cutoff to use during validation. + dlatent_avg_beta = 0.995, # Decay for tracking the moving average of W during training. None = disable. + style_mixing_prob = 0.9, # Probability of mixing styles during training. None = disable. + is_training = False, # Network is under training? Enables and disables specific features. + is_validation = False, # Network is under validation? Chooses which value to use for truncation_psi. + is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation. + components = dnnlib.EasyDict(), # Container for sub-networks. Retained between calls. + **kwargs): # Arguments for sub-networks (G_mapping and G_synthesis). + + # Validate arguments. + assert not is_training or not is_validation + assert isinstance(components, dnnlib.EasyDict) + if is_validation: + truncation_psi = truncation_psi_val + truncation_cutoff = truncation_cutoff_val + if is_training or (truncation_psi is not None and not tflib.is_tf_expression(truncation_psi) and truncation_psi == 1): + truncation_psi = None + if is_training or (truncation_cutoff is not None and not tflib.is_tf_expression(truncation_cutoff) and truncation_cutoff <= 0): + truncation_cutoff = None + if not is_training or (dlatent_avg_beta is not None and not tflib.is_tf_expression(dlatent_avg_beta) and dlatent_avg_beta == 1): + dlatent_avg_beta = None + if not is_training or (style_mixing_prob is not None and not tflib.is_tf_expression(style_mixing_prob) and style_mixing_prob <= 0): + style_mixing_prob = None + + # Setup components. + if 'synthesis' not in components: + components.synthesis = tflib.Network('G_synthesis', func_name=G_synthesis, **kwargs) + num_layers = components.synthesis.input_shape[1] + dlatent_size = components.synthesis.input_shape[2] + if 'mapping' not in components: + components.mapping = tflib.Network('G_mapping', func_name=G_mapping, dlatent_broadcast=num_layers, **kwargs) + + # Setup variables. + lod_in = tf.get_variable('lod', initializer=np.float32(0), trainable=False) + dlatent_avg = tf.get_variable('dlatent_avg', shape=[dlatent_size], initializer=tf.initializers.zeros(), trainable=False) + + # Evaluate mapping network. + dlatents = components.mapping.get_output_for(latents_in, labels_in, **kwargs) + + # Update moving average of W. + if dlatent_avg_beta is not None: + with tf.variable_scope('DlatentAvg'): + batch_avg = tf.reduce_mean(dlatents[:, 0], axis=0) + update_op = tf.assign(dlatent_avg, tflib.lerp(batch_avg, dlatent_avg, dlatent_avg_beta)) + with tf.control_dependencies([update_op]): + dlatents = tf.identity(dlatents) + + # Perform style mixing regularization. + if style_mixing_prob is not None: + with tf.name_scope('StyleMix'): + latents2 = tf.random_normal(tf.shape(latents_in)) + dlatents2 = components.mapping.get_output_for(latents2, labels_in, **kwargs) + layer_idx = np.arange(num_layers)[np.newaxis, :, np.newaxis] + cur_layers = num_layers - tf.cast(lod_in, tf.int32) * 2 + mixing_cutoff = tf.cond( + tf.random_uniform([], 0.0, 1.0) < style_mixing_prob, + lambda: tf.random_uniform([], 1, cur_layers, dtype=tf.int32), + lambda: cur_layers) + dlatents = tf.where(tf.broadcast_to(layer_idx < mixing_cutoff, tf.shape(dlatents)), dlatents, dlatents2) + + # Apply truncation trick. + if truncation_psi is not None and truncation_cutoff is not None: + with tf.variable_scope('Truncation'): + layer_idx = np.arange(num_layers)[np.newaxis, :, np.newaxis] + ones = np.ones(layer_idx.shape, dtype=np.float32) + coefs = tf.where(layer_idx < truncation_cutoff, truncation_psi * ones, ones) + dlatents = tflib.lerp(dlatent_avg, dlatents, coefs) + + # Evaluate synthesis network. + with tf.control_dependencies([tf.assign(components.synthesis.find_var('lod'), lod_in)]): + images_out = components.synthesis.get_output_for(dlatents, force_clean_graph=is_template_graph, **kwargs) + return tf.identity(images_out, name='images_out') + +#---------------------------------------------------------------------------- +# Mapping network used in the StyleGAN paper. + +def G_mapping( + latents_in, # First input: Latent vectors (Z) [minibatch, latent_size]. + labels_in, # Second input: Conditioning labels [minibatch, label_size]. + latent_size = 512, # Latent vector (Z) dimensionality. + label_size = 0, # Label dimensionality, 0 if no labels. + dlatent_size = 512, # Disentangled latent (W) dimensionality. + dlatent_broadcast = None, # Output disentangled latent (W) as [minibatch, dlatent_size] or [minibatch, dlatent_broadcast, dlatent_size]. + mapping_layers = 8, # Number of mapping layers. + mapping_fmaps = 512, # Number of activations in the mapping layers. + mapping_lrmul = 0.01, # Learning rate multiplier for the mapping layers. + mapping_nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu'. + use_wscale = True, # Enable equalized learning rate? + normalize_latents = True, # Normalize latent vectors (Z) before feeding them to the mapping layers? + dtype = 'float32', # Data type to use for activations and outputs. + **_kwargs): # Ignore unrecognized keyword args. + + act, gain = {'relu': (tf.nn.relu, np.sqrt(2)), 'lrelu': (leaky_relu, np.sqrt(2))}[mapping_nonlinearity] + + # Inputs. + latents_in.set_shape([None, latent_size]) + labels_in.set_shape([None, label_size]) + latents_in = tf.cast(latents_in, dtype) + labels_in = tf.cast(labels_in, dtype) + x = latents_in + + # Embed labels and concatenate them with latents. + if label_size: + with tf.variable_scope('LabelConcat'): + w = tf.get_variable('weight', shape=[label_size, latent_size], initializer=tf.initializers.random_normal()) + y = tf.matmul(labels_in, tf.cast(w, dtype)) + x = tf.concat([x, y], axis=1) + + # Normalize latents. + if normalize_latents: + x = pixel_norm(x) + + # Mapping layers. + for layer_idx in range(mapping_layers): + with tf.variable_scope('Dense%d' % layer_idx): + fmaps = dlatent_size if layer_idx == mapping_layers - 1 else mapping_fmaps + x = dense(x, fmaps=fmaps, gain=gain, use_wscale=use_wscale, lrmul=mapping_lrmul) + x = apply_bias(x, lrmul=mapping_lrmul) + x = act(x) + + # Broadcast. + if dlatent_broadcast is not None: + with tf.variable_scope('Broadcast'): + x = tf.tile(x[:, np.newaxis], [1, dlatent_broadcast, 1]) + + # Output. + assert x.dtype == tf.as_dtype(dtype) + return tf.identity(x, name='dlatents_out') + +#---------------------------------------------------------------------------- +# Synthesis network used in the StyleGAN paper. + +def G_synthesis( + dlatents_in, # Input: Disentangled latents (W) [minibatch, num_layers, dlatent_size]. + dlatent_size = 512, # Disentangled latent (W) dimensionality. + num_channels = 3, # Number of output color channels. + resolution = 1024, # Output resolution. + fmap_base = 8192, # Overall multiplier for the number of feature maps. + fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution. + fmap_max = 512, # Maximum number of feature maps in any layer. + use_styles = True, # Enable style inputs? + const_input_layer = True, # First layer is a learned constant? + use_noise = True, # Enable noise inputs? + randomize_noise = True, # True = randomize noise inputs every time (non-deterministic), False = read noise inputs from variables. + nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu' + use_wscale = True, # Enable equalized learning rate? + use_pixel_norm = False, # Enable pixelwise feature vector normalization? + use_instance_norm = True, # Enable instance normalization? + dtype = 'float32', # Data type to use for activations and outputs. + fused_scale = 'auto', # True = fused convolution + scaling, False = separate ops, 'auto' = decide automatically. + blur_filter = [1,2,1], # Low-pass filter to apply when resampling activations. None = no filtering. + structure = 'auto', # 'fixed' = no progressive growing, 'linear' = human-readable, 'recursive' = efficient, 'auto' = select automatically. + is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation. + force_clean_graph = False, # True = construct a clean graph that looks nice in TensorBoard, False = default behavior. + **_kwargs): # Ignore unrecognized keyword args. + + resolution_log2 = int(np.log2(resolution)) + assert resolution == 2**resolution_log2 and resolution >= 4 + def nf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max) + def blur(x): return blur2d(x, blur_filter) if blur_filter else x + if is_template_graph: force_clean_graph = True + if force_clean_graph: randomize_noise = False + if structure == 'auto': structure = 'linear' if force_clean_graph else 'recursive' + act, gain = {'relu': (tf.nn.relu, np.sqrt(2)), 'lrelu': (leaky_relu, np.sqrt(2))}[nonlinearity] + num_layers = resolution_log2 * 2 - 2 + num_styles = num_layers if use_styles else 1 + images_out = None + + # Primary inputs. + dlatents_in.set_shape([None, num_styles, dlatent_size]) + dlatents_in = tf.cast(dlatents_in, dtype) + lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0), trainable=False), dtype) + + # Noise inputs. + noise_inputs = [] + if use_noise: + for layer_idx in range(num_layers): + res = layer_idx // 2 + 2 + shape = [1, use_noise, 2**res, 2**res] + noise_inputs.append(tf.get_variable('noise%d' % layer_idx, shape=shape, initializer=tf.initializers.random_normal(), trainable=False)) + + # Things to do at the end of each layer. + def layer_epilogue(x, layer_idx): + if use_noise: + x = apply_noise(x, noise_inputs[layer_idx], randomize_noise=randomize_noise) + x = apply_bias(x) + x = act(x) + if use_pixel_norm: + x = pixel_norm(x) + if use_instance_norm: + x = instance_norm(x) + if use_styles: + x = style_mod(x, dlatents_in[:, layer_idx], use_wscale=use_wscale) + return x + + # Early layers. + with tf.variable_scope('4x4'): + if const_input_layer: + with tf.variable_scope('Const'): + x = tf.get_variable('const', shape=[1, nf(1), 4, 4], initializer=tf.initializers.ones()) + x = layer_epilogue(tf.tile(tf.cast(x, dtype), [tf.shape(dlatents_in)[0], 1, 1, 1]), 0) + else: + with tf.variable_scope('Dense'): + x = dense(dlatents_in[:, 0], fmaps=nf(1)*16, gain=gain/4, use_wscale=use_wscale) # tweak gain to match the official implementation of Progressing GAN + x = layer_epilogue(tf.reshape(x, [-1, nf(1), 4, 4]), 0) + with tf.variable_scope('Conv'): + x = layer_epilogue(conv2d(x, fmaps=nf(1), kernel=3, gain=gain, use_wscale=use_wscale), 1) + + # Building blocks for remaining layers. + def block(res, x): # res = 3..resolution_log2 + with tf.variable_scope('%dx%d' % (2**res, 2**res)): + with tf.variable_scope('Conv0_up'): + x = layer_epilogue(blur(upscale2d_conv2d(x, fmaps=nf(res-1), kernel=3, gain=gain, use_wscale=use_wscale, fused_scale=fused_scale)), res*2-4) + with tf.variable_scope('Conv1'): + x = layer_epilogue(conv2d(x, fmaps=nf(res-1), kernel=3, gain=gain, use_wscale=use_wscale), res*2-3) + return x + def torgb(res, x): # res = 2..resolution_log2 + lod = resolution_log2 - res + with tf.variable_scope('ToRGB_lod%d' % lod): + return apply_bias(conv2d(x, fmaps=num_channels, kernel=1, gain=1, use_wscale=use_wscale)) + + # Fixed structure: simple and efficient, but does not support progressive growing. + if structure == 'fixed': + for res in range(3, resolution_log2 + 1): + x = block(res, x) + images_out = torgb(resolution_log2, x) + + # Linear structure: simple but inefficient. + if structure == 'linear': + images_out = torgb(2, x) + for res in range(3, resolution_log2 + 1): + lod = resolution_log2 - res + x = block(res, x) + img = torgb(res, x) + images_out = upscale2d(images_out) + with tf.variable_scope('Grow_lod%d' % lod): + images_out = tflib.lerp_clip(img, images_out, lod_in - lod) + + # Recursive structure: complex but efficient. + if structure == 'recursive': + def cset(cur_lambda, new_cond, new_lambda): + return lambda: tf.cond(new_cond, new_lambda, cur_lambda) + def grow(x, res, lod): + y = block(res, x) + img = lambda: upscale2d(torgb(res, y), 2**lod) + img = cset(img, (lod_in > lod), lambda: upscale2d(tflib.lerp(torgb(res, y), upscale2d(torgb(res - 1, x)), lod_in - lod), 2**lod)) + if lod > 0: img = cset(img, (lod_in < lod), lambda: grow(y, res + 1, lod - 1)) + return img() + images_out = grow(x, 3, resolution_log2 - 3) + + assert images_out.dtype == tf.as_dtype(dtype) + return tf.identity(images_out, name='images_out') + +#---------------------------------------------------------------------------- +# Discriminator used in the StyleGAN paper. + +def D_basic( + images_in, # First input: Images [minibatch, channel, height, width]. + labels_in, # Second input: Labels [minibatch, label_size]. + num_channels = 1, # Number of input color channels. Overridden based on dataset. + resolution = 32, # Input resolution. Overridden based on dataset. + label_size = 0, # Dimensionality of the labels, 0 if no labels. Overridden based on dataset. + fmap_base = 8192, # Overall multiplier for the number of feature maps. + fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution. + fmap_max = 512, # Maximum number of feature maps in any layer. + nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu', + use_wscale = True, # Enable equalized learning rate? + mbstd_group_size = 4, # Group size for the minibatch standard deviation layer, 0 = disable. + mbstd_num_features = 1, # Number of features for the minibatch standard deviation layer. + dtype = 'float32', # Data type to use for activations and outputs. + fused_scale = 'auto', # True = fused convolution + scaling, False = separate ops, 'auto' = decide automatically. + blur_filter = [1,2,1], # Low-pass filter to apply when resampling activations. None = no filtering. + structure = 'auto', # 'fixed' = no progressive growing, 'linear' = human-readable, 'recursive' = efficient, 'auto' = select automatically. + is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation. + **_kwargs): # Ignore unrecognized keyword args. + + resolution_log2 = int(np.log2(resolution)) + assert resolution == 2**resolution_log2 and resolution >= 4 + def nf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max) + def blur(x): return blur2d(x, blur_filter) if blur_filter else x + if structure == 'auto': structure = 'linear' if is_template_graph else 'recursive' + act, gain = {'relu': (tf.nn.relu, np.sqrt(2)), 'lrelu': (leaky_relu, np.sqrt(2))}[nonlinearity] + + images_in.set_shape([None, num_channels, resolution, resolution]) + labels_in.set_shape([None, label_size]) + images_in = tf.cast(images_in, dtype) + labels_in = tf.cast(labels_in, dtype) + lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0.0), trainable=False), dtype) + scores_out = None + + # Building blocks. + def fromrgb(x, res): # res = 2..resolution_log2 + with tf.variable_scope('FromRGB_lod%d' % (resolution_log2 - res)): + return act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=1, gain=gain, use_wscale=use_wscale))) + def block(x, res): # res = 2..resolution_log2 + with tf.variable_scope('%dx%d' % (2**res, 2**res)): + if res >= 3: # 8x8 and up + with tf.variable_scope('Conv0'): + x = act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, gain=gain, use_wscale=use_wscale))) + with tf.variable_scope('Conv1_down'): + x = act(apply_bias(conv2d_downscale2d(blur(x), fmaps=nf(res-2), kernel=3, gain=gain, use_wscale=use_wscale, fused_scale=fused_scale))) + else: # 4x4 + if mbstd_group_size > 1: + x = minibatch_stddev_layer(x, mbstd_group_size, mbstd_num_features) + with tf.variable_scope('Conv'): + x = act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, gain=gain, use_wscale=use_wscale))) + with tf.variable_scope('Dense0'): + x = act(apply_bias(dense(x, fmaps=nf(res-2), gain=gain, use_wscale=use_wscale))) + with tf.variable_scope('Dense1'): + x = apply_bias(dense(x, fmaps=max(label_size, 1), gain=1, use_wscale=use_wscale)) + return x + + # Fixed structure: simple and efficient, but does not support progressive growing. + if structure == 'fixed': + x = fromrgb(images_in, resolution_log2) + for res in range(resolution_log2, 2, -1): + x = block(x, res) + scores_out = block(x, 2) + + # Linear structure: simple but inefficient. + if structure == 'linear': + img = images_in + x = fromrgb(img, resolution_log2) + for res in range(resolution_log2, 2, -1): + lod = resolution_log2 - res + x = block(x, res) + img = downscale2d(img) + y = fromrgb(img, res - 1) + with tf.variable_scope('Grow_lod%d' % lod): + x = tflib.lerp_clip(x, y, lod_in - lod) + scores_out = block(x, 2) + + # Recursive structure: complex but efficient. + if structure == 'recursive': + def cset(cur_lambda, new_cond, new_lambda): + return lambda: tf.cond(new_cond, new_lambda, cur_lambda) + def grow(res, lod): + x = lambda: fromrgb(downscale2d(images_in, 2**lod), res) + if lod > 0: x = cset(x, (lod_in < lod), lambda: grow(res + 1, lod - 1)) + x = block(x(), res); y = lambda: x + if res > 2: y = cset(y, (lod_in > lod), lambda: tflib.lerp(x, fromrgb(downscale2d(images_in, 2**(lod+1)), res - 1), lod_in - lod)) + return y() + scores_out = grow(2, resolution_log2 - 2) + + # Label conditioning from "Which Training Methods for GANs do actually Converge?" + if label_size: + with tf.variable_scope('LabelSwitch'): + scores_out = tf.reduce_sum(scores_out * labels_in, axis=1, keepdims=True) + + assert scores_out.dtype == tf.as_dtype(dtype) + scores_out = tf.identity(scores_out, name='scores_out') + return scores_out + +#---------------------------------------------------------------------------- diff --git a/models/stylegan_tf_official/training/training_loop.py b/models/stylegan_tf_official/training/training_loop.py new file mode 100644 index 0000000000000000000000000000000000000000..d9ccb45b1a0321f1d938efa6a62229ffe396dcfe --- /dev/null +++ b/models/stylegan_tf_official/training/training_loop.py @@ -0,0 +1,278 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# This work is licensed under the Creative Commons Attribution-NonCommercial +# 4.0 International License. To view a copy of this license, visit +# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to +# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. + +"""Main training script.""" + +import os +import numpy as np +import tensorflow as tf +import dnnlib +import dnnlib.tflib as tflib +from dnnlib.tflib.autosummary import autosummary + +import config +import train +from training import dataset +from training import misc +from metrics import metric_base + +#---------------------------------------------------------------------------- +# Just-in-time processing of training images before feeding them to the networks. + +def process_reals(x, lod, mirror_augment, drange_data, drange_net): + with tf.name_scope('ProcessReals'): + with tf.name_scope('DynamicRange'): + x = tf.cast(x, tf.float32) + x = misc.adjust_dynamic_range(x, drange_data, drange_net) + if mirror_augment: + with tf.name_scope('MirrorAugment'): + s = tf.shape(x) + mask = tf.random_uniform([s[0], 1, 1, 1], 0.0, 1.0) + mask = tf.tile(mask, [1, s[1], s[2], s[3]]) + x = tf.where(mask < 0.5, x, tf.reverse(x, axis=[3])) + with tf.name_scope('FadeLOD'): # Smooth crossfade between consecutive levels-of-detail. + s = tf.shape(x) + y = tf.reshape(x, [-1, s[1], s[2]//2, 2, s[3]//2, 2]) + y = tf.reduce_mean(y, axis=[3, 5], keepdims=True) + y = tf.tile(y, [1, 1, 1, 2, 1, 2]) + y = tf.reshape(y, [-1, s[1], s[2], s[3]]) + x = tflib.lerp(x, y, lod - tf.floor(lod)) + with tf.name_scope('UpscaleLOD'): # Upscale to match the expected input/output size of the networks. + s = tf.shape(x) + factor = tf.cast(2 ** tf.floor(lod), tf.int32) + x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1]) + x = tf.tile(x, [1, 1, 1, factor, 1, factor]) + x = tf.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor]) + return x + +#---------------------------------------------------------------------------- +# Evaluate time-varying training parameters. + +def training_schedule( + cur_nimg, + training_set, + num_gpus, + lod_initial_resolution = 4, # Image resolution used at the beginning. + lod_training_kimg = 600, # Thousands of real images to show before doubling the resolution. + lod_transition_kimg = 600, # Thousands of real images to show when fading in new layers. + minibatch_base = 16, # Maximum minibatch size, divided evenly among GPUs. + minibatch_dict = {}, # Resolution-specific overrides. + max_minibatch_per_gpu = {}, # Resolution-specific maximum minibatch size per GPU. + G_lrate_base = 0.001, # Learning rate for the generator. + G_lrate_dict = {}, # Resolution-specific overrides. + D_lrate_base = 0.001, # Learning rate for the discriminator. + D_lrate_dict = {}, # Resolution-specific overrides. + lrate_rampup_kimg = 0, # Duration of learning rate ramp-up. + tick_kimg_base = 160, # Default interval of progress snapshots. + tick_kimg_dict = {4: 160, 8:140, 16:120, 32:100, 64:80, 128:60, 256:40, 512:30, 1024:20}): # Resolution-specific overrides. + + # Initialize result dict. + s = dnnlib.EasyDict() + s.kimg = cur_nimg / 1000.0 + + # Training phase. + phase_dur = lod_training_kimg + lod_transition_kimg + phase_idx = int(np.floor(s.kimg / phase_dur)) if phase_dur > 0 else 0 + phase_kimg = s.kimg - phase_idx * phase_dur + + # Level-of-detail and resolution. + s.lod = training_set.resolution_log2 + s.lod -= np.floor(np.log2(lod_initial_resolution)) + s.lod -= phase_idx + if lod_transition_kimg > 0: + s.lod -= max(phase_kimg - lod_training_kimg, 0.0) / lod_transition_kimg + s.lod = max(s.lod, 0.0) + s.resolution = 2 ** (training_set.resolution_log2 - int(np.floor(s.lod))) + + # Minibatch size. + s.minibatch = minibatch_dict.get(s.resolution, minibatch_base) + s.minibatch -= s.minibatch % num_gpus + if s.resolution in max_minibatch_per_gpu: + s.minibatch = min(s.minibatch, max_minibatch_per_gpu[s.resolution] * num_gpus) + + # Learning rate. + s.G_lrate = G_lrate_dict.get(s.resolution, G_lrate_base) + s.D_lrate = D_lrate_dict.get(s.resolution, D_lrate_base) + if lrate_rampup_kimg > 0: + rampup = min(s.kimg / lrate_rampup_kimg, 1.0) + s.G_lrate *= rampup + s.D_lrate *= rampup + + # Other parameters. + s.tick_kimg = tick_kimg_dict.get(s.resolution, tick_kimg_base) + return s + +#---------------------------------------------------------------------------- +# Main training script. + +def training_loop( + submit_config, + G_args = {}, # Options for generator network. + D_args = {}, # Options for discriminator network. + G_opt_args = {}, # Options for generator optimizer. + D_opt_args = {}, # Options for discriminator optimizer. + G_loss_args = {}, # Options for generator loss. + D_loss_args = {}, # Options for discriminator loss. + dataset_args = {}, # Options for dataset.load_dataset(). + sched_args = {}, # Options for train.TrainingSchedule. + grid_args = {}, # Options for train.setup_snapshot_image_grid(). + metric_arg_list = [], # Options for MetricGroup. + tf_config = {}, # Options for tflib.init_tf(). + G_smoothing_kimg = 10.0, # Half-life of the running average of generator weights. + D_repeats = 1, # How many times the discriminator is trained per G iteration. + minibatch_repeats = 4, # Number of minibatches to run before adjusting training parameters. + reset_opt_for_new_lod = True, # Reset optimizer internal state (e.g. Adam moments) when new layers are introduced? + total_kimg = 15000, # Total length of the training, measured in thousands of real images. + mirror_augment = False, # Enable mirror augment? + drange_net = [-1,1], # Dynamic range used when feeding image data to the networks. + image_snapshot_ticks = 1, # How often to export image snapshots? + network_snapshot_ticks = 10, # How often to export network snapshots? + save_tf_graph = False, # Include full TensorFlow computation graph in the tfevents file? + save_weight_histograms = False, # Include weight histograms in the tfevents file? + resume_run_id = None, # Run ID or network pkl to resume training from, None = start from scratch. + resume_snapshot = None, # Snapshot index to resume training from, None = autodetect. + resume_kimg = 0.0, # Assumed training progress at the beginning. Affects reporting and training schedule. + resume_time = 0.0): # Assumed wallclock time at the beginning. Affects reporting. + + # Initialize dnnlib and TensorFlow. + ctx = dnnlib.RunContext(submit_config, train) + tflib.init_tf(tf_config) + + # Load training set. + training_set = dataset.load_dataset(data_dir=config.data_dir, verbose=True, **dataset_args) + + # Construct networks. + with tf.device('/gpu:0'): + if resume_run_id is not None: + network_pkl = misc.locate_network_pkl(resume_run_id, resume_snapshot) + print('Loading networks from "%s"...' % network_pkl) + G, D, Gs = misc.load_pkl(network_pkl) + else: + print('Constructing networks...') + G = tflib.Network('G', num_channels=training_set.shape[0], resolution=training_set.shape[1], label_size=training_set.label_size, **G_args) + D = tflib.Network('D', num_channels=training_set.shape[0], resolution=training_set.shape[1], label_size=training_set.label_size, **D_args) + Gs = G.clone('Gs') + G.print_layers(); D.print_layers() + + print('Building TensorFlow graph...') + with tf.name_scope('Inputs'), tf.device('/cpu:0'): + lod_in = tf.placeholder(tf.float32, name='lod_in', shape=[]) + lrate_in = tf.placeholder(tf.float32, name='lrate_in', shape=[]) + minibatch_in = tf.placeholder(tf.int32, name='minibatch_in', shape=[]) + minibatch_split = minibatch_in // submit_config.num_gpus + Gs_beta = 0.5 ** tf.div(tf.cast(minibatch_in, tf.float32), G_smoothing_kimg * 1000.0) if G_smoothing_kimg > 0.0 else 0.0 + + G_opt = tflib.Optimizer(name='TrainG', learning_rate=lrate_in, **G_opt_args) + D_opt = tflib.Optimizer(name='TrainD', learning_rate=lrate_in, **D_opt_args) + for gpu in range(submit_config.num_gpus): + with tf.name_scope('GPU%d' % gpu), tf.device('/gpu:%d' % gpu): + G_gpu = G if gpu == 0 else G.clone(G.name + '_shadow') + D_gpu = D if gpu == 0 else D.clone(D.name + '_shadow') + lod_assign_ops = [tf.assign(G_gpu.find_var('lod'), lod_in), tf.assign(D_gpu.find_var('lod'), lod_in)] + reals, labels = training_set.get_minibatch_tf() + reals = process_reals(reals, lod_in, mirror_augment, training_set.dynamic_range, drange_net) + with tf.name_scope('G_loss'), tf.control_dependencies(lod_assign_ops): + G_loss = dnnlib.util.call_func_by_name(G=G_gpu, D=D_gpu, opt=G_opt, training_set=training_set, minibatch_size=minibatch_split, **G_loss_args) + with tf.name_scope('D_loss'), tf.control_dependencies(lod_assign_ops): + D_loss = dnnlib.util.call_func_by_name(G=G_gpu, D=D_gpu, opt=D_opt, training_set=training_set, minibatch_size=minibatch_split, reals=reals, labels=labels, **D_loss_args) + G_opt.register_gradients(tf.reduce_mean(G_loss), G_gpu.trainables) + D_opt.register_gradients(tf.reduce_mean(D_loss), D_gpu.trainables) + G_train_op = G_opt.apply_updates() + D_train_op = D_opt.apply_updates() + + Gs_update_op = Gs.setup_as_moving_average_of(G, beta=Gs_beta) + with tf.device('/gpu:0'): + try: + peak_gpu_mem_op = tf.contrib.memory_stats.MaxBytesInUse() + except tf.errors.NotFoundError: + peak_gpu_mem_op = tf.constant(0) + + print('Setting up snapshot image grid...') + grid_size, grid_reals, grid_labels, grid_latents = misc.setup_snapshot_image_grid(G, training_set, **grid_args) + sched = training_schedule(cur_nimg=total_kimg*1000, training_set=training_set, num_gpus=submit_config.num_gpus, **sched_args) + grid_fakes = Gs.run(grid_latents, grid_labels, is_validation=True, minibatch_size=sched.minibatch//submit_config.num_gpus) + + print('Setting up run dir...') + misc.save_image_grid(grid_reals, os.path.join(submit_config.run_dir, 'reals.png'), drange=training_set.dynamic_range, grid_size=grid_size) + misc.save_image_grid(grid_fakes, os.path.join(submit_config.run_dir, 'fakes%06d.png' % resume_kimg), drange=drange_net, grid_size=grid_size) + summary_log = tf.summary.FileWriter(submit_config.run_dir) + if save_tf_graph: + summary_log.add_graph(tf.get_default_graph()) + if save_weight_histograms: + G.setup_weight_histograms(); D.setup_weight_histograms() + metrics = metric_base.MetricGroup(metric_arg_list) + + print('Training...\n') + ctx.update('', cur_epoch=resume_kimg, max_epoch=total_kimg) + maintenance_time = ctx.get_last_update_interval() + cur_nimg = int(resume_kimg * 1000) + cur_tick = 0 + tick_start_nimg = cur_nimg + prev_lod = -1.0 + while cur_nimg < total_kimg * 1000: + if ctx.should_stop(): break + + # Choose training parameters and configure training ops. + sched = training_schedule(cur_nimg=cur_nimg, training_set=training_set, num_gpus=submit_config.num_gpus, **sched_args) + training_set.configure(sched.minibatch // submit_config.num_gpus, sched.lod) + if reset_opt_for_new_lod: + if np.floor(sched.lod) != np.floor(prev_lod) or np.ceil(sched.lod) != np.ceil(prev_lod): + G_opt.reset_optimizer_state(); D_opt.reset_optimizer_state() + prev_lod = sched.lod + + # Run training ops. + for _mb_repeat in range(minibatch_repeats): + for _D_repeat in range(D_repeats): + tflib.run([D_train_op, Gs_update_op], {lod_in: sched.lod, lrate_in: sched.D_lrate, minibatch_in: sched.minibatch}) + cur_nimg += sched.minibatch + tflib.run([G_train_op], {lod_in: sched.lod, lrate_in: sched.G_lrate, minibatch_in: sched.minibatch}) + + # Perform maintenance tasks once per tick. + done = (cur_nimg >= total_kimg * 1000) + if cur_nimg >= tick_start_nimg + sched.tick_kimg * 1000 or done: + cur_tick += 1 + tick_kimg = (cur_nimg - tick_start_nimg) / 1000.0 + tick_start_nimg = cur_nimg + tick_time = ctx.get_time_since_last_update() + total_time = ctx.get_time_since_start() + resume_time + + # Report progress. + print('tick %-5d kimg %-8.1f lod %-5.2f minibatch %-4d time %-12s sec/tick %-7.1f sec/kimg %-7.2f maintenance %-6.1f gpumem %-4.1f' % ( + autosummary('Progress/tick', cur_tick), + autosummary('Progress/kimg', cur_nimg / 1000.0), + autosummary('Progress/lod', sched.lod), + autosummary('Progress/minibatch', sched.minibatch), + dnnlib.util.format_time(autosummary('Timing/total_sec', total_time)), + autosummary('Timing/sec_per_tick', tick_time), + autosummary('Timing/sec_per_kimg', tick_time / tick_kimg), + autosummary('Timing/maintenance_sec', maintenance_time), + autosummary('Resources/peak_gpu_mem_gb', peak_gpu_mem_op.eval() / 2**30))) + autosummary('Timing/total_hours', total_time / (60.0 * 60.0)) + autosummary('Timing/total_days', total_time / (24.0 * 60.0 * 60.0)) + + # Save snapshots. + if cur_tick % image_snapshot_ticks == 0 or done: + grid_fakes = Gs.run(grid_latents, grid_labels, is_validation=True, minibatch_size=sched.minibatch//submit_config.num_gpus) + misc.save_image_grid(grid_fakes, os.path.join(submit_config.run_dir, 'fakes%06d.png' % (cur_nimg // 1000)), drange=drange_net, grid_size=grid_size) + if cur_tick % network_snapshot_ticks == 0 or done or cur_tick == 1: + pkl = os.path.join(submit_config.run_dir, 'network-snapshot-%06d.pkl' % (cur_nimg // 1000)) + misc.save_pkl((G, D, Gs), pkl) + metrics.run(pkl, run_dir=submit_config.run_dir, num_gpus=submit_config.num_gpus, tf_config=tf_config) + + # Update summaries and RunContext. + metrics.update_autosummaries() + tflib.autosummary.save_summaries(summary_log, cur_nimg) + ctx.update('%.2f' % sched.lod, cur_epoch=cur_nimg // 1000, max_epoch=total_kimg) + maintenance_time = ctx.get_last_update_interval() - tick_time + + # Write final results. + misc.save_pkl((G, D, Gs), os.path.join(submit_config.run_dir, 'network-final.pkl')) + summary_log.close() + + ctx.close() + +#---------------------------------------------------------------------------- diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..f9b2aa91e4a9024db31af76e55e7a5b7fa4350b7 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,2 @@ +torch +numpy \ No newline at end of file diff --git a/torch_utils/__init__.py b/torch_utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..939e7c6c8f94c4ea1141885c3c3295fe083b06aa --- /dev/null +++ b/torch_utils/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# NVIDIA CORPORATION and its licensors retain all intellectual property +# and proprietary rights in and to this software, related documentation +# and any modifications thereto. Any use, reproduction, disclosure or +# distribution of this software and related documentation without an express +# license agreement from NVIDIA CORPORATION is strictly prohibited. + +# empty diff --git a/torch_utils/__pycache__/__init__.cpython-38.pyc b/torch_utils/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a11efcddf52e2ff8082a8d498e483439865d1d6 Binary files /dev/null and b/torch_utils/__pycache__/__init__.cpython-38.pyc differ diff --git a/torch_utils/__pycache__/custom_ops.cpython-38.pyc b/torch_utils/__pycache__/custom_ops.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f65750788afd8e56227a0d81f56ef5461715bcce Binary files /dev/null and b/torch_utils/__pycache__/custom_ops.cpython-38.pyc differ diff --git a/torch_utils/__pycache__/misc.cpython-38.pyc b/torch_utils/__pycache__/misc.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe7cc8d2a27dfd92965ea052b00741d53ee9e0e9 Binary files /dev/null and b/torch_utils/__pycache__/misc.cpython-38.pyc differ diff --git a/torch_utils/__pycache__/persistence.cpython-38.pyc b/torch_utils/__pycache__/persistence.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da289d329f8a44582921fb4df7a4268f7721b6ab Binary files /dev/null and b/torch_utils/__pycache__/persistence.cpython-38.pyc differ diff --git a/torch_utils/custom_ops.py b/torch_utils/custom_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..dd7cc046e925f58602154be9bdf678ca9d76f59f --- /dev/null +++ b/torch_utils/custom_ops.py @@ -0,0 +1,157 @@ +# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# NVIDIA CORPORATION and its licensors retain all intellectual property +# and proprietary rights in and to this software, related documentation +# and any modifications thereto. Any use, reproduction, disclosure or +# distribution of this software and related documentation without an express +# license agreement from NVIDIA CORPORATION is strictly prohibited. + +import glob +import hashlib +import importlib +import os +import re +import shutil +import uuid + +import torch +import torch.utils.cpp_extension +from torch.utils.file_baton import FileBaton + +#---------------------------------------------------------------------------- +# Global options. + +verbosity = 'brief' # Verbosity level: 'none', 'brief', 'full' + +#---------------------------------------------------------------------------- +# Internal helper funcs. + +def _find_compiler_bindir(): + patterns = [ + 'C:/Program Files (x86)/Microsoft Visual Studio/*/Professional/VC/Tools/MSVC/*/bin/Hostx64/x64', + 'C:/Program Files (x86)/Microsoft Visual Studio/*/BuildTools/VC/Tools/MSVC/*/bin/Hostx64/x64', + 'C:/Program Files (x86)/Microsoft Visual Studio/*/Community/VC/Tools/MSVC/*/bin/Hostx64/x64', + 'C:/Program Files (x86)/Microsoft Visual Studio */vc/bin', + ] + for pattern in patterns: + matches = sorted(glob.glob(pattern)) + if len(matches): + return matches[-1] + return None + +#---------------------------------------------------------------------------- + +def _get_mangled_gpu_name(): + name = torch.cuda.get_device_name().lower() + out = [] + for c in name: + if re.match('[a-z0-9_-]+', c): + out.append(c) + else: + out.append('-') + return ''.join(out) + +#---------------------------------------------------------------------------- +# Main entry point for compiling and loading C++/CUDA plugins. + +_cached_plugins = dict() + +def get_plugin(module_name, sources, headers=None, source_dir=None, **build_kwargs): + assert verbosity in ['none', 'brief', 'full'] + if headers is None: + headers = [] + if source_dir is not None: + sources = [os.path.join(source_dir, fname) for fname in sources] + headers = [os.path.join(source_dir, fname) for fname in headers] + + # Already cached? + if module_name in _cached_plugins: + return _cached_plugins[module_name] + + # Print status. + if verbosity == 'full': + print(f'Setting up PyTorch plugin "{module_name}"...') + elif verbosity == 'brief': + print(f'Setting up PyTorch plugin "{module_name}"... ', end='', flush=True) + verbose_build = (verbosity == 'full') + + # Compile and load. + try: # pylint: disable=too-many-nested-blocks + # Make sure we can find the necessary compiler binaries. + if os.name == 'nt' and os.system("where cl.exe >nul 2>nul") != 0: + compiler_bindir = _find_compiler_bindir() + if compiler_bindir is None: + raise RuntimeError(f'Could not find MSVC/GCC/CLANG installation on this computer. Check _find_compiler_bindir() in "{__file__}".') + os.environ['PATH'] += ';' + compiler_bindir + + # Some containers set TORCH_CUDA_ARCH_LIST to a list that can either + # break the build or unnecessarily restrict what's available to nvcc. + # Unset it to let nvcc decide based on what's available on the + # machine. + os.environ['TORCH_CUDA_ARCH_LIST'] = '' + + # Incremental build md5sum trickery. Copies all the input source files + # into a cached build directory under a combined md5 digest of the input + # source files. Copying is done only if the combined digest has changed. + # This keeps input file timestamps and filenames the same as in previous + # extension builds, allowing for fast incremental rebuilds. + # + # This optimization is done only in case all the source files reside in + # a single directory (just for simplicity) and if the TORCH_EXTENSIONS_DIR + # environment variable is set (we take this as a signal that the user + # actually cares about this.) + # + # EDIT: We now do it regardless of TORCH_EXTENSIOS_DIR, in order to work + # around the *.cu dependency bug in ninja config. + # + all_source_files = sorted(sources + headers) + all_source_dirs = set(os.path.dirname(fname) for fname in all_source_files) + if len(all_source_dirs) == 1: # and ('TORCH_EXTENSIONS_DIR' in os.environ): + + # Compute combined hash digest for all source files. + hash_md5 = hashlib.md5() + for src in all_source_files: + with open(src, 'rb') as f: + hash_md5.update(f.read()) + + # Select cached build directory name. + source_digest = hash_md5.hexdigest() + build_top_dir = torch.utils.cpp_extension._get_build_directory(module_name, verbose=verbose_build) # pylint: disable=protected-access + cached_build_dir = os.path.join(build_top_dir, f'{source_digest}-{_get_mangled_gpu_name()}') + + if not os.path.isdir(cached_build_dir): + tmpdir = f'{build_top_dir}/srctmp-{uuid.uuid4().hex}' + os.makedirs(tmpdir) + for src in all_source_files: + shutil.copyfile(src, os.path.join(tmpdir, os.path.basename(src))) + try: + os.replace(tmpdir, cached_build_dir) # atomic + except OSError: + # source directory already exists, delete tmpdir and its contents. + shutil.rmtree(tmpdir) + if not os.path.isdir(cached_build_dir): raise + + # Compile. + cached_sources = [os.path.join(cached_build_dir, os.path.basename(fname)) for fname in sources] + torch.utils.cpp_extension.load(name=module_name, build_directory=cached_build_dir, + verbose=verbose_build, sources=cached_sources, **build_kwargs) + else: + torch.utils.cpp_extension.load(name=module_name, verbose=verbose_build, sources=sources, **build_kwargs) + + # Load. + module = importlib.import_module(module_name) + + except: + if verbosity == 'brief': + print('Failed!') + raise + + # Print status and add to cache dict. + if verbosity == 'full': + print(f'Done setting up PyTorch plugin "{module_name}".') + elif verbosity == 'brief': + print('Done.') + _cached_plugins[module_name] = module + return module + +#---------------------------------------------------------------------------- diff --git a/torch_utils/misc.py b/torch_utils/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..335397dd1662d8f5bfd44e17899a00549867f4bc --- /dev/null +++ b/torch_utils/misc.py @@ -0,0 +1,266 @@ +# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# NVIDIA CORPORATION and its licensors retain all intellectual property +# and proprietary rights in and to this software, related documentation +# and any modifications thereto. Any use, reproduction, disclosure or +# distribution of this software and related documentation without an express +# license agreement from NVIDIA CORPORATION is strictly prohibited. + +import re +import contextlib +import numpy as np +import torch +import warnings +import dnnlib + +#---------------------------------------------------------------------------- +# Cached construction of constant tensors. Avoids CPU=>GPU copy when the +# same constant is used multiple times. + +_constant_cache = dict() + +def constant(value, shape=None, dtype=None, device=None, memory_format=None): + value = np.asarray(value) + if shape is not None: + shape = tuple(shape) + if dtype is None: + dtype = torch.get_default_dtype() + if device is None: + device = torch.device('cpu') + if memory_format is None: + memory_format = torch.contiguous_format + + key = (value.shape, value.dtype, value.tobytes(), shape, dtype, device, memory_format) + tensor = _constant_cache.get(key, None) + if tensor is None: + tensor = torch.as_tensor(value.copy(), dtype=dtype, device=device) + if shape is not None: + tensor, _ = torch.broadcast_tensors(tensor, torch.empty(shape)) + tensor = tensor.contiguous(memory_format=memory_format) + _constant_cache[key] = tensor + return tensor + +#---------------------------------------------------------------------------- +# Replace NaN/Inf with specified numerical values. + +try: + nan_to_num = torch.nan_to_num # 1.8.0a0 +except AttributeError: + def nan_to_num(input, nan=0.0, posinf=None, neginf=None, *, out=None): # pylint: disable=redefined-builtin + assert isinstance(input, torch.Tensor) + if posinf is None: + posinf = torch.finfo(input.dtype).max + if neginf is None: + neginf = torch.finfo(input.dtype).min + assert nan == 0 + return torch.clamp(input.unsqueeze(0).nansum(0), min=neginf, max=posinf, out=out) + +#---------------------------------------------------------------------------- +# Symbolic assert. + +try: + symbolic_assert = torch._assert # 1.8.0a0 # pylint: disable=protected-access +except AttributeError: + symbolic_assert = torch.Assert # 1.7.0 + +#---------------------------------------------------------------------------- +# Context manager to temporarily suppress known warnings in torch.jit.trace(). +# Note: Cannot use catch_warnings because of https://bugs.python.org/issue29672 + +@contextlib.contextmanager +def suppress_tracer_warnings(): + flt = ('ignore', None, torch.jit.TracerWarning, None, 0) + warnings.filters.insert(0, flt) + yield + warnings.filters.remove(flt) + +#---------------------------------------------------------------------------- +# Assert that the shape of a tensor matches the given list of integers. +# None indicates that the size of a dimension is allowed to vary. +# Performs symbolic assertion when used in torch.jit.trace(). + +def assert_shape(tensor, ref_shape): + if tensor.ndim != len(ref_shape): + raise AssertionError(f'Wrong number of dimensions: got {tensor.ndim}, expected {len(ref_shape)}') + for idx, (size, ref_size) in enumerate(zip(tensor.shape, ref_shape)): + if ref_size is None: + pass + elif isinstance(ref_size, torch.Tensor): + with suppress_tracer_warnings(): # as_tensor results are registered as constants + symbolic_assert(torch.equal(torch.as_tensor(size), ref_size), f'Wrong size for dimension {idx}') + elif isinstance(size, torch.Tensor): + with suppress_tracer_warnings(): # as_tensor results are registered as constants + symbolic_assert(torch.equal(size, torch.as_tensor(ref_size)), f'Wrong size for dimension {idx}: expected {ref_size}') + elif size != ref_size: + raise AssertionError(f'Wrong size for dimension {idx}: got {size}, expected {ref_size}') + +#---------------------------------------------------------------------------- +# Function decorator that calls torch.autograd.profiler.record_function(). + +def profiled_function(fn): + def decorator(*args, **kwargs): + with torch.autograd.profiler.record_function(fn.__name__): + return fn(*args, **kwargs) + decorator.__name__ = fn.__name__ + return decorator + +#---------------------------------------------------------------------------- +# Sampler for torch.utils.data.DataLoader that loops over the dataset +# indefinitely, shuffling items as it goes. + +class InfiniteSampler(torch.utils.data.Sampler): + def __init__(self, dataset, rank=0, num_replicas=1, shuffle=True, seed=0, window_size=0.5): + assert len(dataset) > 0 + assert num_replicas > 0 + assert 0 <= rank < num_replicas + assert 0 <= window_size <= 1 + super().__init__(dataset) + self.dataset = dataset + self.rank = rank + self.num_replicas = num_replicas + self.shuffle = shuffle + self.seed = seed + self.window_size = window_size + + def __iter__(self): + order = np.arange(len(self.dataset)) + rnd = None + window = 0 + if self.shuffle: + rnd = np.random.RandomState(self.seed) + rnd.shuffle(order) + window = int(np.rint(order.size * self.window_size)) + + idx = 0 + while True: + i = idx % order.size + if idx % self.num_replicas == self.rank: + yield order[i] + if window >= 2: + j = (i - rnd.randint(window)) % order.size + order[i], order[j] = order[j], order[i] + idx += 1 + +#---------------------------------------------------------------------------- +# Utilities for operating with torch.nn.Module parameters and buffers. + +def params_and_buffers(module): + assert isinstance(module, torch.nn.Module) + return list(module.parameters()) + list(module.buffers()) + +def named_params_and_buffers(module): + assert isinstance(module, torch.nn.Module) + return list(module.named_parameters()) + list(module.named_buffers()) + +def copy_params_and_buffers(src_module, dst_module, require_all=False): + assert isinstance(src_module, torch.nn.Module) + assert isinstance(dst_module, torch.nn.Module) + src_tensors = dict(named_params_and_buffers(src_module)) + for name, tensor in named_params_and_buffers(dst_module): + assert (name in src_tensors) or (not require_all) + if name in src_tensors: + tensor.copy_(src_tensors[name].detach()).requires_grad_(tensor.requires_grad) + +#---------------------------------------------------------------------------- +# Context manager for easily enabling/disabling DistributedDataParallel +# synchronization. + +@contextlib.contextmanager +def ddp_sync(module, sync): + assert isinstance(module, torch.nn.Module) + if sync or not isinstance(module, torch.nn.parallel.DistributedDataParallel): + yield + else: + with module.no_sync(): + yield + +#---------------------------------------------------------------------------- +# Check DistributedDataParallel consistency across processes. + +def check_ddp_consistency(module, ignore_regex=None): + assert isinstance(module, torch.nn.Module) + for name, tensor in named_params_and_buffers(module): + fullname = type(module).__name__ + '.' + name + if ignore_regex is not None and re.fullmatch(ignore_regex, fullname): + continue + tensor = tensor.detach() + if tensor.is_floating_point(): + tensor = nan_to_num(tensor) + other = tensor.clone() + torch.distributed.broadcast(tensor=other, src=0) + assert (tensor == other).all(), fullname + +#---------------------------------------------------------------------------- +# Print summary table of module hierarchy. + +def print_module_summary(module, inputs, max_nesting=3, skip_redundant=True): + assert isinstance(module, torch.nn.Module) + assert not isinstance(module, torch.jit.ScriptModule) + assert isinstance(inputs, (tuple, list)) + + # Register hooks. + entries = [] + nesting = [0] + def pre_hook(_mod, _inputs): + nesting[0] += 1 + def post_hook(mod, _inputs, outputs): + nesting[0] -= 1 + if nesting[0] <= max_nesting: + outputs = list(outputs) if isinstance(outputs, (tuple, list)) else [outputs] + outputs = [t for t in outputs if isinstance(t, torch.Tensor)] + entries.append(dnnlib.EasyDict(mod=mod, outputs=outputs)) + hooks = [mod.register_forward_pre_hook(pre_hook) for mod in module.modules()] + hooks += [mod.register_forward_hook(post_hook) for mod in module.modules()] + + # Run module. + outputs = module(*inputs) + for hook in hooks: + hook.remove() + + # Identify unique outputs, parameters, and buffers. + tensors_seen = set() + for e in entries: + e.unique_params = [t for t in e.mod.parameters() if id(t) not in tensors_seen] + e.unique_buffers = [t for t in e.mod.buffers() if id(t) not in tensors_seen] + e.unique_outputs = [t for t in e.outputs if id(t) not in tensors_seen] + tensors_seen |= {id(t) for t in e.unique_params + e.unique_buffers + e.unique_outputs} + + # Filter out redundant entries. + if skip_redundant: + entries = [e for e in entries if len(e.unique_params) or len(e.unique_buffers) or len(e.unique_outputs)] + + # Construct table. + rows = [[type(module).__name__, 'Parameters', 'Buffers', 'Output shape', 'Datatype']] + rows += [['---'] * len(rows[0])] + param_total = 0 + buffer_total = 0 + submodule_names = {mod: name for name, mod in module.named_modules()} + for e in entries: + name = '' if e.mod is module else submodule_names[e.mod] + param_size = sum(t.numel() for t in e.unique_params) + buffer_size = sum(t.numel() for t in e.unique_buffers) + output_shapes = [str(list(t.shape)) for t in e.outputs] + output_dtypes = [str(t.dtype).split('.')[-1] for t in e.outputs] + rows += [[ + name + (':0' if len(e.outputs) >= 2 else ''), + str(param_size) if param_size else '-', + str(buffer_size) if buffer_size else '-', + (output_shapes + ['-'])[0], + (output_dtypes + ['-'])[0], + ]] + for idx in range(1, len(e.outputs)): + rows += [[name + f':{idx}', '-', '-', output_shapes[idx], output_dtypes[idx]]] + param_total += param_size + buffer_total += buffer_size + rows += [['---'] * len(rows[0])] + rows += [['Total', str(param_total), str(buffer_total), '-', '-']] + + # Print table. + widths = [max(len(cell) for cell in column) for column in zip(*rows)] + print() + for row in rows: + print(' '.join(cell + ' ' * (width - len(cell)) for cell, width in zip(row, widths))) + print() + return outputs + +#---------------------------------------------------------------------------- diff --git a/torch_utils/ops/__init__.py b/torch_utils/ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..939e7c6c8f94c4ea1141885c3c3295fe083b06aa --- /dev/null +++ b/torch_utils/ops/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# NVIDIA CORPORATION and its licensors retain all intellectual property +# and proprietary rights in and to this software, related documentation +# and any modifications thereto. Any use, reproduction, disclosure or +# distribution of this software and related documentation without an express +# license agreement from NVIDIA CORPORATION is strictly prohibited. + +# empty diff --git a/torch_utils/ops/__pycache__/__init__.cpython-38.pyc b/torch_utils/ops/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b28edc47550b5d8935027477578e9161c451816 Binary files /dev/null and b/torch_utils/ops/__pycache__/__init__.cpython-38.pyc differ diff --git a/torch_utils/ops/__pycache__/bias_act.cpython-38.pyc b/torch_utils/ops/__pycache__/bias_act.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ea8eb092377774def79e5e85602fdd39e96f24d Binary files /dev/null and b/torch_utils/ops/__pycache__/bias_act.cpython-38.pyc differ diff --git a/torch_utils/ops/__pycache__/conv2d_gradfix.cpython-38.pyc b/torch_utils/ops/__pycache__/conv2d_gradfix.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b7fef4d7acae5b8434b1ec20b5ec6e807e00c09 Binary files /dev/null and b/torch_utils/ops/__pycache__/conv2d_gradfix.cpython-38.pyc differ diff --git a/torch_utils/ops/__pycache__/conv2d_resample.cpython-38.pyc b/torch_utils/ops/__pycache__/conv2d_resample.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e64cb27ff87855d6e641f14f0c37030b3660278f Binary files /dev/null and b/torch_utils/ops/__pycache__/conv2d_resample.cpython-38.pyc differ diff --git a/torch_utils/ops/__pycache__/filtered_lrelu.cpython-38.pyc b/torch_utils/ops/__pycache__/filtered_lrelu.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec703dd0dc58e50f476858742e72390fb11a1fbe Binary files /dev/null and b/torch_utils/ops/__pycache__/filtered_lrelu.cpython-38.pyc differ diff --git a/torch_utils/ops/__pycache__/fma.cpython-38.pyc b/torch_utils/ops/__pycache__/fma.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c92bc932a00d61833eb072cd513cb92533ca157a Binary files /dev/null and b/torch_utils/ops/__pycache__/fma.cpython-38.pyc differ diff --git a/torch_utils/ops/__pycache__/upfirdn2d.cpython-38.pyc b/torch_utils/ops/__pycache__/upfirdn2d.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3921b2fdc3d60f2a5da11ebcfa2b5199d5f8f402 Binary files /dev/null and b/torch_utils/ops/__pycache__/upfirdn2d.cpython-38.pyc differ diff --git a/torch_utils/ops/bias_act.cpp b/torch_utils/ops/bias_act.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3adaeee2ae44e96655d354c2bdfb81de8ebfe6c6 --- /dev/null +++ b/torch_utils/ops/bias_act.cpp @@ -0,0 +1,99 @@ +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// +// NVIDIA CORPORATION and its licensors retain all intellectual property +// and proprietary rights in and to this software, related documentation +// and any modifications thereto. Any use, reproduction, disclosure or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA CORPORATION is strictly prohibited. + +#include +#include +#include +#include "bias_act.h" + +//------------------------------------------------------------------------ + +static bool has_same_layout(torch::Tensor x, torch::Tensor y) +{ + if (x.dim() != y.dim()) + return false; + for (int64_t i = 0; i < x.dim(); i++) + { + if (x.size(i) != y.size(i)) + return false; + if (x.size(i) >= 2 && x.stride(i) != y.stride(i)) + return false; + } + return true; +} + +//------------------------------------------------------------------------ + +static torch::Tensor bias_act(torch::Tensor x, torch::Tensor b, torch::Tensor xref, torch::Tensor yref, torch::Tensor dy, int grad, int dim, int act, float alpha, float gain, float clamp) +{ + // Validate arguments. + TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device"); + TORCH_CHECK(b.numel() == 0 || (b.dtype() == x.dtype() && b.device() == x.device()), "b must have the same dtype and device as x"); + TORCH_CHECK(xref.numel() == 0 || (xref.sizes() == x.sizes() && xref.dtype() == x.dtype() && xref.device() == x.device()), "xref must have the same shape, dtype, and device as x"); + TORCH_CHECK(yref.numel() == 0 || (yref.sizes() == x.sizes() && yref.dtype() == x.dtype() && yref.device() == x.device()), "yref must have the same shape, dtype, and device as x"); + TORCH_CHECK(dy.numel() == 0 || (dy.sizes() == x.sizes() && dy.dtype() == x.dtype() && dy.device() == x.device()), "dy must have the same dtype and device as x"); + TORCH_CHECK(x.numel() <= INT_MAX, "x is too large"); + TORCH_CHECK(b.dim() == 1, "b must have rank 1"); + TORCH_CHECK(b.numel() == 0 || (dim >= 0 && dim < x.dim()), "dim is out of bounds"); + TORCH_CHECK(b.numel() == 0 || b.numel() == x.size(dim), "b has wrong number of elements"); + TORCH_CHECK(grad >= 0, "grad must be non-negative"); + + // Validate layout. + TORCH_CHECK(x.is_non_overlapping_and_dense(), "x must be non-overlapping and dense"); + TORCH_CHECK(b.is_contiguous(), "b must be contiguous"); + TORCH_CHECK(xref.numel() == 0 || has_same_layout(xref, x), "xref must have the same layout as x"); + TORCH_CHECK(yref.numel() == 0 || has_same_layout(yref, x), "yref must have the same layout as x"); + TORCH_CHECK(dy.numel() == 0 || has_same_layout(dy, x), "dy must have the same layout as x"); + + // Create output tensor. + const at::cuda::OptionalCUDAGuard device_guard(device_of(x)); + torch::Tensor y = torch::empty_like(x); + TORCH_CHECK(has_same_layout(y, x), "y must have the same layout as x"); + + // Initialize CUDA kernel parameters. + bias_act_kernel_params p; + p.x = x.data_ptr(); + p.b = (b.numel()) ? b.data_ptr() : NULL; + p.xref = (xref.numel()) ? xref.data_ptr() : NULL; + p.yref = (yref.numel()) ? yref.data_ptr() : NULL; + p.dy = (dy.numel()) ? dy.data_ptr() : NULL; + p.y = y.data_ptr(); + p.grad = grad; + p.act = act; + p.alpha = alpha; + p.gain = gain; + p.clamp = clamp; + p.sizeX = (int)x.numel(); + p.sizeB = (int)b.numel(); + p.stepB = (b.numel()) ? (int)x.stride(dim) : 1; + + // Choose CUDA kernel. + void* kernel; + AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&] + { + kernel = choose_bias_act_kernel(p); + }); + TORCH_CHECK(kernel, "no CUDA kernel found for the specified activation func"); + + // Launch CUDA kernel. + p.loopX = 4; + int blockSize = 4 * 32; + int gridSize = (p.sizeX - 1) / (p.loopX * blockSize) + 1; + void* args[] = {&p}; + AT_CUDA_CHECK(cudaLaunchKernel(kernel, gridSize, blockSize, args, 0, at::cuda::getCurrentCUDAStream())); + return y; +} + +//------------------------------------------------------------------------ + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("bias_act", &bias_act); +} + +//------------------------------------------------------------------------ diff --git a/torch_utils/ops/bias_act.cu b/torch_utils/ops/bias_act.cu new file mode 100644 index 0000000000000000000000000000000000000000..ed1d16f14eadd1344939e074ace1375cfd936cea --- /dev/null +++ b/torch_utils/ops/bias_act.cu @@ -0,0 +1,173 @@ +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// +// NVIDIA CORPORATION and its licensors retain all intellectual property +// and proprietary rights in and to this software, related documentation +// and any modifications thereto. Any use, reproduction, disclosure or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA CORPORATION is strictly prohibited. + +#include +#include "bias_act.h" + +//------------------------------------------------------------------------ +// Helpers. + +template struct InternalType; +template <> struct InternalType { typedef double scalar_t; }; +template <> struct InternalType { typedef float scalar_t; }; +template <> struct InternalType { typedef float scalar_t; }; + +//------------------------------------------------------------------------ +// CUDA kernel. + +template +__global__ void bias_act_kernel(bias_act_kernel_params p) +{ + typedef typename InternalType::scalar_t scalar_t; + int G = p.grad; + scalar_t alpha = (scalar_t)p.alpha; + scalar_t gain = (scalar_t)p.gain; + scalar_t clamp = (scalar_t)p.clamp; + scalar_t one = (scalar_t)1; + scalar_t two = (scalar_t)2; + scalar_t expRange = (scalar_t)80; + scalar_t halfExpRange = (scalar_t)40; + scalar_t seluScale = (scalar_t)1.0507009873554804934193349852946; + scalar_t seluAlpha = (scalar_t)1.6732632423543772848170429916717; + + // Loop over elements. + int xi = blockIdx.x * p.loopX * blockDim.x + threadIdx.x; + for (int loopIdx = 0; loopIdx < p.loopX && xi < p.sizeX; loopIdx++, xi += blockDim.x) + { + // Load. + scalar_t x = (scalar_t)((const T*)p.x)[xi]; + scalar_t b = (p.b) ? (scalar_t)((const T*)p.b)[(xi / p.stepB) % p.sizeB] : 0; + scalar_t xref = (p.xref) ? (scalar_t)((const T*)p.xref)[xi] : 0; + scalar_t yref = (p.yref) ? (scalar_t)((const T*)p.yref)[xi] : 0; + scalar_t dy = (p.dy) ? (scalar_t)((const T*)p.dy)[xi] : one; + scalar_t yy = (gain != 0) ? yref / gain : 0; + scalar_t y = 0; + + // Apply bias. + ((G == 0) ? x : xref) += b; + + // linear + if (A == 1) + { + if (G == 0) y = x; + if (G == 1) y = x; + } + + // relu + if (A == 2) + { + if (G == 0) y = (x > 0) ? x : 0; + if (G == 1) y = (yy > 0) ? x : 0; + } + + // lrelu + if (A == 3) + { + if (G == 0) y = (x > 0) ? x : x * alpha; + if (G == 1) y = (yy > 0) ? x : x * alpha; + } + + // tanh + if (A == 4) + { + if (G == 0) { scalar_t c = exp(x); scalar_t d = one / c; y = (x < -expRange) ? -one : (x > expRange) ? one : (c - d) / (c + d); } + if (G == 1) y = x * (one - yy * yy); + if (G == 2) y = x * (one - yy * yy) * (-two * yy); + } + + // sigmoid + if (A == 5) + { + if (G == 0) y = (x < -expRange) ? 0 : one / (exp(-x) + one); + if (G == 1) y = x * yy * (one - yy); + if (G == 2) y = x * yy * (one - yy) * (one - two * yy); + } + + // elu + if (A == 6) + { + if (G == 0) y = (x >= 0) ? x : exp(x) - one; + if (G == 1) y = (yy >= 0) ? x : x * (yy + one); + if (G == 2) y = (yy >= 0) ? 0 : x * (yy + one); + } + + // selu + if (A == 7) + { + if (G == 0) y = (x >= 0) ? seluScale * x : (seluScale * seluAlpha) * (exp(x) - one); + if (G == 1) y = (yy >= 0) ? x * seluScale : x * (yy + seluScale * seluAlpha); + if (G == 2) y = (yy >= 0) ? 0 : x * (yy + seluScale * seluAlpha); + } + + // softplus + if (A == 8) + { + if (G == 0) y = (x > expRange) ? x : log(exp(x) + one); + if (G == 1) y = x * (one - exp(-yy)); + if (G == 2) { scalar_t c = exp(-yy); y = x * c * (one - c); } + } + + // swish + if (A == 9) + { + if (G == 0) + y = (x < -expRange) ? 0 : x / (exp(-x) + one); + else + { + scalar_t c = exp(xref); + scalar_t d = c + one; + if (G == 1) + y = (xref > halfExpRange) ? x : x * c * (xref + d) / (d * d); + else + y = (xref > halfExpRange) ? 0 : x * c * (xref * (two - d) + two * d) / (d * d * d); + yref = (xref < -expRange) ? 0 : xref / (exp(-xref) + one) * gain; + } + } + + // Apply gain. + y *= gain * dy; + + // Clamp. + if (clamp >= 0) + { + if (G == 0) + y = (y > -clamp & y < clamp) ? y : (y >= 0) ? clamp : -clamp; + else + y = (yref > -clamp & yref < clamp) ? y : 0; + } + + // Store. + ((T*)p.y)[xi] = (T)y; + } +} + +//------------------------------------------------------------------------ +// CUDA kernel selection. + +template void* choose_bias_act_kernel(const bias_act_kernel_params& p) +{ + if (p.act == 1) return (void*)bias_act_kernel; + if (p.act == 2) return (void*)bias_act_kernel; + if (p.act == 3) return (void*)bias_act_kernel; + if (p.act == 4) return (void*)bias_act_kernel; + if (p.act == 5) return (void*)bias_act_kernel; + if (p.act == 6) return (void*)bias_act_kernel; + if (p.act == 7) return (void*)bias_act_kernel; + if (p.act == 8) return (void*)bias_act_kernel; + if (p.act == 9) return (void*)bias_act_kernel; + return NULL; +} + +//------------------------------------------------------------------------ +// Template specializations. + +template void* choose_bias_act_kernel (const bias_act_kernel_params& p); +template void* choose_bias_act_kernel (const bias_act_kernel_params& p); +template void* choose_bias_act_kernel (const bias_act_kernel_params& p); + +//------------------------------------------------------------------------ diff --git a/torch_utils/ops/bias_act.h b/torch_utils/ops/bias_act.h new file mode 100644 index 0000000000000000000000000000000000000000..60b81c6058d54638a6d74a13046fa388442d767d --- /dev/null +++ b/torch_utils/ops/bias_act.h @@ -0,0 +1,38 @@ +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// +// NVIDIA CORPORATION and its licensors retain all intellectual property +// and proprietary rights in and to this software, related documentation +// and any modifications thereto. Any use, reproduction, disclosure or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA CORPORATION is strictly prohibited. + +//------------------------------------------------------------------------ +// CUDA kernel parameters. + +struct bias_act_kernel_params +{ + const void* x; // [sizeX] + const void* b; // [sizeB] or NULL + const void* xref; // [sizeX] or NULL + const void* yref; // [sizeX] or NULL + const void* dy; // [sizeX] or NULL + void* y; // [sizeX] + + int grad; + int act; + float alpha; + float gain; + float clamp; + + int sizeX; + int sizeB; + int stepB; + int loopX; +}; + +//------------------------------------------------------------------------ +// CUDA kernel selection. + +template void* choose_bias_act_kernel(const bias_act_kernel_params& p); + +//------------------------------------------------------------------------ diff --git a/torch_utils/ops/bias_act.py b/torch_utils/ops/bias_act.py new file mode 100644 index 0000000000000000000000000000000000000000..5c485c0027570decab26f0b6602a363a432b851f --- /dev/null +++ b/torch_utils/ops/bias_act.py @@ -0,0 +1,209 @@ +# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# NVIDIA CORPORATION and its licensors retain all intellectual property +# and proprietary rights in and to this software, related documentation +# and any modifications thereto. Any use, reproduction, disclosure or +# distribution of this software and related documentation without an express +# license agreement from NVIDIA CORPORATION is strictly prohibited. + +"""Custom PyTorch ops for efficient bias and activation.""" + +import os +import numpy as np +import torch +import dnnlib + +from .. import custom_ops +from .. import misc + +#---------------------------------------------------------------------------- + +activation_funcs = { + 'linear': dnnlib.EasyDict(func=lambda x, **_: x, def_alpha=0, def_gain=1, cuda_idx=1, ref='', has_2nd_grad=False), + 'relu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.relu(x), def_alpha=0, def_gain=np.sqrt(2), cuda_idx=2, ref='y', has_2nd_grad=False), + 'lrelu': dnnlib.EasyDict(func=lambda x, alpha, **_: torch.nn.functional.leaky_relu(x, alpha), def_alpha=0.2, def_gain=np.sqrt(2), cuda_idx=3, ref='y', has_2nd_grad=False), + 'tanh': dnnlib.EasyDict(func=lambda x, **_: torch.tanh(x), def_alpha=0, def_gain=1, cuda_idx=4, ref='y', has_2nd_grad=True), + 'sigmoid': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x), def_alpha=0, def_gain=1, cuda_idx=5, ref='y', has_2nd_grad=True), + 'elu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.elu(x), def_alpha=0, def_gain=1, cuda_idx=6, ref='y', has_2nd_grad=True), + 'selu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.selu(x), def_alpha=0, def_gain=1, cuda_idx=7, ref='y', has_2nd_grad=True), + 'softplus': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.softplus(x), def_alpha=0, def_gain=1, cuda_idx=8, ref='y', has_2nd_grad=True), + 'swish': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x) * x, def_alpha=0, def_gain=np.sqrt(2), cuda_idx=9, ref='x', has_2nd_grad=True), +} + +#---------------------------------------------------------------------------- + +_plugin = None +_null_tensor = torch.empty([0]) + +def _init(): + global _plugin + if _plugin is None: + _plugin = custom_ops.get_plugin( + module_name='bias_act_plugin', + sources=['bias_act.cpp', 'bias_act.cu'], + headers=['bias_act.h'], + source_dir=os.path.dirname(__file__), + extra_cuda_cflags=['--use_fast_math'], + ) + return True + +#---------------------------------------------------------------------------- + +def bias_act(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None, impl='cuda'): + r"""Fused bias and activation function. + + Adds bias `b` to activation tensor `x`, evaluates activation function `act`, + and scales the result by `gain`. Each of the steps is optional. In most cases, + the fused op is considerably more efficient than performing the same calculation + using standard PyTorch ops. It supports first and second order gradients, + but not third order gradients. + + Args: + x: Input activation tensor. Can be of any shape. + b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type + as `x`. The shape must be known, and it must match the dimension of `x` + corresponding to `dim`. + dim: The dimension in `x` corresponding to the elements of `b`. + The value of `dim` is ignored if `b` is not specified. + act: Name of the activation function to evaluate, or `"linear"` to disable. + Can be e.g. `"relu"`, `"lrelu"`, `"tanh"`, `"sigmoid"`, `"swish"`, etc. + See `activation_funcs` for a full list. `None` is not allowed. + alpha: Shape parameter for the activation function, or `None` to use the default. + gain: Scaling factor for the output tensor, or `None` to use default. + See `activation_funcs` for the default scaling of each activation function. + If unsure, consider specifying 1. + clamp: Clamp the output values to `[-clamp, +clamp]`, or `None` to disable + the clamping (default). + impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default). + + Returns: + Tensor of the same shape and datatype as `x`. + """ + assert isinstance(x, torch.Tensor) + assert impl in ['ref', 'cuda'] + if impl == 'cuda' and x.device.type == 'cuda' and _init(): + return _bias_act_cuda(dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp).apply(x, b) + return _bias_act_ref(x=x, b=b, dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp) + +#---------------------------------------------------------------------------- + +@misc.profiled_function +def _bias_act_ref(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None): + """Slow reference implementation of `bias_act()` using standard TensorFlow ops. + """ + assert isinstance(x, torch.Tensor) + assert clamp is None or clamp >= 0 + spec = activation_funcs[act] + alpha = float(alpha if alpha is not None else spec.def_alpha) + gain = float(gain if gain is not None else spec.def_gain) + clamp = float(clamp if clamp is not None else -1) + + # Add bias. + if b is not None: + assert isinstance(b, torch.Tensor) and b.ndim == 1 + assert 0 <= dim < x.ndim + assert b.shape[0] == x.shape[dim] + x = x + b.reshape([-1 if i == dim else 1 for i in range(x.ndim)]) + + # Evaluate activation function. + alpha = float(alpha) + x = spec.func(x, alpha=alpha) + + # Scale by gain. + gain = float(gain) + if gain != 1: + x = x * gain + + # Clamp. + if clamp >= 0: + x = x.clamp(-clamp, clamp) # pylint: disable=invalid-unary-operand-type + return x + +#---------------------------------------------------------------------------- + +_bias_act_cuda_cache = dict() + +def _bias_act_cuda(dim=1, act='linear', alpha=None, gain=None, clamp=None): + """Fast CUDA implementation of `bias_act()` using custom ops. + """ + # Parse arguments. + assert clamp is None or clamp >= 0 + spec = activation_funcs[act] + alpha = float(alpha if alpha is not None else spec.def_alpha) + gain = float(gain if gain is not None else spec.def_gain) + clamp = float(clamp if clamp is not None else -1) + + # Lookup from cache. + key = (dim, act, alpha, gain, clamp) + if key in _bias_act_cuda_cache: + return _bias_act_cuda_cache[key] + + # Forward op. + class BiasActCuda(torch.autograd.Function): + @staticmethod + def forward(ctx, x, b): # pylint: disable=arguments-differ + ctx.memory_format = torch.channels_last if x.ndim > 2 and x.stride(1) == 1 else torch.contiguous_format + x = x.contiguous(memory_format=ctx.memory_format) + b = b.contiguous() if b is not None else _null_tensor + y = x + if act != 'linear' or gain != 1 or clamp >= 0 or b is not _null_tensor: + y = _plugin.bias_act(x, b, _null_tensor, _null_tensor, _null_tensor, 0, dim, spec.cuda_idx, alpha, gain, clamp) + ctx.save_for_backward( + x if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor, + b if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor, + y if 'y' in spec.ref else _null_tensor) + return y + + @staticmethod + def backward(ctx, dy): # pylint: disable=arguments-differ + dy = dy.contiguous(memory_format=ctx.memory_format) + x, b, y = ctx.saved_tensors + dx = None + db = None + + if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]: + dx = dy + if act != 'linear' or gain != 1 or clamp >= 0: + dx = BiasActCudaGrad.apply(dy, x, b, y) + + if ctx.needs_input_grad[1]: + db = dx.sum([i for i in range(dx.ndim) if i != dim]) + + return dx, db + + # Backward op. + class BiasActCudaGrad(torch.autograd.Function): + @staticmethod + def forward(ctx, dy, x, b, y): # pylint: disable=arguments-differ + ctx.memory_format = torch.channels_last if dy.ndim > 2 and dy.stride(1) == 1 else torch.contiguous_format + dx = _plugin.bias_act(dy, b, x, y, _null_tensor, 1, dim, spec.cuda_idx, alpha, gain, clamp) + ctx.save_for_backward( + dy if spec.has_2nd_grad else _null_tensor, + x, b, y) + return dx + + @staticmethod + def backward(ctx, d_dx): # pylint: disable=arguments-differ + d_dx = d_dx.contiguous(memory_format=ctx.memory_format) + dy, x, b, y = ctx.saved_tensors + d_dy = None + d_x = None + d_b = None + d_y = None + + if ctx.needs_input_grad[0]: + d_dy = BiasActCudaGrad.apply(d_dx, x, b, y) + + if spec.has_2nd_grad and (ctx.needs_input_grad[1] or ctx.needs_input_grad[2]): + d_x = _plugin.bias_act(d_dx, b, x, y, dy, 2, dim, spec.cuda_idx, alpha, gain, clamp) + + if spec.has_2nd_grad and ctx.needs_input_grad[2]: + d_b = d_x.sum([i for i in range(d_x.ndim) if i != dim]) + + return d_dy, d_x, d_b, d_y + + # Add to cache. + _bias_act_cuda_cache[key] = BiasActCuda + return BiasActCuda + +#---------------------------------------------------------------------------- diff --git a/torch_utils/ops/conv2d_gradfix.py b/torch_utils/ops/conv2d_gradfix.py new file mode 100644 index 0000000000000000000000000000000000000000..388778fa971d7bc5c64b5fd6c0e5492863ee1c5f --- /dev/null +++ b/torch_utils/ops/conv2d_gradfix.py @@ -0,0 +1,198 @@ +# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# NVIDIA CORPORATION and its licensors retain all intellectual property +# and proprietary rights in and to this software, related documentation +# and any modifications thereto. Any use, reproduction, disclosure or +# distribution of this software and related documentation without an express +# license agreement from NVIDIA CORPORATION is strictly prohibited. + +"""Custom replacement for `torch.nn.functional.conv2d` that supports +arbitrarily high order gradients with zero performance penalty.""" + +import contextlib +import torch + +# pylint: disable=redefined-builtin +# pylint: disable=arguments-differ +# pylint: disable=protected-access + +#---------------------------------------------------------------------------- + +enabled = False # Enable the custom op by setting this to true. +weight_gradients_disabled = False # Forcefully disable computation of gradients with respect to the weights. + +@contextlib.contextmanager +def no_weight_gradients(disable=True): + global weight_gradients_disabled + old = weight_gradients_disabled + if disable: + weight_gradients_disabled = True + yield + weight_gradients_disabled = old + +#---------------------------------------------------------------------------- + +def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1): + if _should_use_custom_op(input): + return _conv2d_gradfix(transpose=False, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=0, dilation=dilation, groups=groups).apply(input, weight, bias) + return torch.nn.functional.conv2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups) + +def conv_transpose2d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1): + if _should_use_custom_op(input): + return _conv2d_gradfix(transpose=True, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation).apply(input, weight, bias) + return torch.nn.functional.conv_transpose2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation) + +#---------------------------------------------------------------------------- + +def _should_use_custom_op(input): + assert isinstance(input, torch.Tensor) + if (not enabled) or (not torch.backends.cudnn.enabled): + return False + if input.device.type != 'cuda': + return False + return True + +def _tuple_of_ints(xs, ndim): + xs = tuple(xs) if isinstance(xs, (tuple, list)) else (xs,) * ndim + assert len(xs) == ndim + assert all(isinstance(x, int) for x in xs) + return xs + +#---------------------------------------------------------------------------- + +_conv2d_gradfix_cache = dict() +_null_tensor = torch.empty([0]) + +def _conv2d_gradfix(transpose, weight_shape, stride, padding, output_padding, dilation, groups): + # Parse arguments. + ndim = 2 + weight_shape = tuple(weight_shape) + stride = _tuple_of_ints(stride, ndim) + padding = _tuple_of_ints(padding, ndim) + output_padding = _tuple_of_ints(output_padding, ndim) + dilation = _tuple_of_ints(dilation, ndim) + + # Lookup from cache. + key = (transpose, weight_shape, stride, padding, output_padding, dilation, groups) + if key in _conv2d_gradfix_cache: + return _conv2d_gradfix_cache[key] + + # Validate arguments. + assert groups >= 1 + assert len(weight_shape) == ndim + 2 + assert all(stride[i] >= 1 for i in range(ndim)) + assert all(padding[i] >= 0 for i in range(ndim)) + assert all(dilation[i] >= 0 for i in range(ndim)) + if not transpose: + assert all(output_padding[i] == 0 for i in range(ndim)) + else: # transpose + assert all(0 <= output_padding[i] < max(stride[i], dilation[i]) for i in range(ndim)) + + # Helpers. + common_kwargs = dict(stride=stride, padding=padding, dilation=dilation, groups=groups) + def calc_output_padding(input_shape, output_shape): + if transpose: + return [0, 0] + return [ + input_shape[i + 2] + - (output_shape[i + 2] - 1) * stride[i] + - (1 - 2 * padding[i]) + - dilation[i] * (weight_shape[i + 2] - 1) + for i in range(ndim) + ] + + # Forward & backward. + class Conv2d(torch.autograd.Function): + @staticmethod + def forward(ctx, input, weight, bias): + assert weight.shape == weight_shape + ctx.save_for_backward( + input if weight.requires_grad else _null_tensor, + weight if input.requires_grad else _null_tensor, + ) + ctx.input_shape = input.shape + + # Simple 1x1 convolution => cuBLAS (only on Volta, not on Ampere). + if weight_shape[2:] == stride == dilation == (1, 1) and padding == (0, 0) and torch.cuda.get_device_capability(input.device) < (8, 0): + a = weight.reshape(groups, weight_shape[0] // groups, weight_shape[1]) + b = input.reshape(input.shape[0], groups, input.shape[1] // groups, -1) + c = (a.transpose(1, 2) if transpose else a) @ b.permute(1, 2, 0, 3).flatten(2) + c = c.reshape(-1, input.shape[0], *input.shape[2:]).transpose(0, 1) + c = c if bias is None else c + bias.unsqueeze(0).unsqueeze(2).unsqueeze(3) + return c.contiguous(memory_format=(torch.channels_last if input.stride(1) == 1 else torch.contiguous_format)) + + # General case => cuDNN. + if transpose: + return torch.nn.functional.conv_transpose2d(input=input, weight=weight, bias=bias, output_padding=output_padding, **common_kwargs) + return torch.nn.functional.conv2d(input=input, weight=weight, bias=bias, **common_kwargs) + + @staticmethod + def backward(ctx, grad_output): + input, weight = ctx.saved_tensors + input_shape = ctx.input_shape + grad_input = None + grad_weight = None + grad_bias = None + + if ctx.needs_input_grad[0]: + p = calc_output_padding(input_shape=input_shape, output_shape=grad_output.shape) + op = _conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs) + grad_input = op.apply(grad_output, weight, None) + assert grad_input.shape == input_shape + + if ctx.needs_input_grad[1] and not weight_gradients_disabled: + grad_weight = Conv2dGradWeight.apply(grad_output, input) + assert grad_weight.shape == weight_shape + + if ctx.needs_input_grad[2]: + grad_bias = grad_output.sum([0, 2, 3]) + + return grad_input, grad_weight, grad_bias + + # Gradient with respect to the weights. + class Conv2dGradWeight(torch.autograd.Function): + @staticmethod + def forward(ctx, grad_output, input): + ctx.save_for_backward( + grad_output if input.requires_grad else _null_tensor, + input if grad_output.requires_grad else _null_tensor, + ) + ctx.grad_output_shape = grad_output.shape + ctx.input_shape = input.shape + + # Simple 1x1 convolution => cuBLAS (on both Volta and Ampere). + if weight_shape[2:] == stride == dilation == (1, 1) and padding == (0, 0): + a = grad_output.reshape(grad_output.shape[0], groups, grad_output.shape[1] // groups, -1).permute(1, 2, 0, 3).flatten(2) + b = input.reshape(input.shape[0], groups, input.shape[1] // groups, -1).permute(1, 2, 0, 3).flatten(2) + c = (b @ a.transpose(1, 2) if transpose else a @ b.transpose(1, 2)).reshape(weight_shape) + return c.contiguous(memory_format=(torch.channels_last if input.stride(1) == 1 else torch.contiguous_format)) + + # General case => cuDNN. + name = 'aten::cudnn_convolution_transpose_backward_weight' if transpose else 'aten::cudnn_convolution_backward_weight' + flags = [torch.backends.cudnn.benchmark, torch.backends.cudnn.deterministic, torch.backends.cudnn.allow_tf32] + return torch._C._jit_get_operation(name)(weight_shape, grad_output, input, padding, stride, dilation, groups, *flags) + + @staticmethod + def backward(ctx, grad2_grad_weight): + grad_output, input = ctx.saved_tensors + grad_output_shape = ctx.grad_output_shape + input_shape = ctx.input_shape + grad2_grad_output = None + grad2_input = None + + if ctx.needs_input_grad[0]: + grad2_grad_output = Conv2d.apply(input, grad2_grad_weight, None) + assert grad2_grad_output.shape == grad_output_shape + + if ctx.needs_input_grad[1]: + p = calc_output_padding(input_shape=input_shape, output_shape=grad_output_shape) + op = _conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs) + grad2_input = op.apply(grad_output, grad2_grad_weight, None) + assert grad2_input.shape == input_shape + + return grad2_grad_output, grad2_input + + _conv2d_gradfix_cache[key] = Conv2d + return Conv2d + +#---------------------------------------------------------------------------- diff --git a/torch_utils/ops/conv2d_resample.py b/torch_utils/ops/conv2d_resample.py new file mode 100644 index 0000000000000000000000000000000000000000..5eb5877d7ffe4af74a2165f1d8d8c39dfac2476b --- /dev/null +++ b/torch_utils/ops/conv2d_resample.py @@ -0,0 +1,143 @@ +# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# NVIDIA CORPORATION and its licensors retain all intellectual property +# and proprietary rights in and to this software, related documentation +# and any modifications thereto. Any use, reproduction, disclosure or +# distribution of this software and related documentation without an express +# license agreement from NVIDIA CORPORATION is strictly prohibited. + +"""2D convolution with optional up/downsampling.""" + +import torch + +from .. import misc +from . import conv2d_gradfix +from . import upfirdn2d +from .upfirdn2d import _parse_padding +from .upfirdn2d import _get_filter_size + +#---------------------------------------------------------------------------- + +def _get_weight_shape(w): + with misc.suppress_tracer_warnings(): # this value will be treated as a constant + shape = [int(sz) for sz in w.shape] + misc.assert_shape(w, shape) + return shape + +#---------------------------------------------------------------------------- + +def _conv2d_wrapper(x, w, stride=1, padding=0, groups=1, transpose=False, flip_weight=True): + """Wrapper for the underlying `conv2d()` and `conv_transpose2d()` implementations. + """ + _out_channels, _in_channels_per_group, kh, kw = _get_weight_shape(w) + + # Flip weight if requested. + # Note: conv2d() actually performs correlation (flip_weight=True) not convolution (flip_weight=False). + if not flip_weight and (kw > 1 or kh > 1): + w = w.flip([2, 3]) + + # Execute using conv2d_gradfix. + op = conv2d_gradfix.conv_transpose2d if transpose else conv2d_gradfix.conv2d + return op(x, w, stride=stride, padding=padding, groups=groups) + +#---------------------------------------------------------------------------- + +@misc.profiled_function +def conv2d_resample(x, w, f=None, up=1, down=1, padding=0, groups=1, flip_weight=True, flip_filter=False): + r"""2D convolution with optional up/downsampling. + + Padding is performed only once at the beginning, not between the operations. + + Args: + x: Input tensor of shape + `[batch_size, in_channels, in_height, in_width]`. + w: Weight tensor of shape + `[out_channels, in_channels//groups, kernel_height, kernel_width]`. + f: Low-pass filter for up/downsampling. Must be prepared beforehand by + calling upfirdn2d.setup_filter(). None = identity (default). + up: Integer upsampling factor (default: 1). + down: Integer downsampling factor (default: 1). + padding: Padding with respect to the upsampled image. Can be a single number + or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` + (default: 0). + groups: Split input channels into N groups (default: 1). + flip_weight: False = convolution, True = correlation (default: True). + flip_filter: False = convolution, True = correlation (default: False). + + Returns: + Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. + """ + # Validate arguments. + assert isinstance(x, torch.Tensor) and (x.ndim == 4) + assert isinstance(w, torch.Tensor) and (w.ndim == 4) and (w.dtype == x.dtype) + assert f is None or (isinstance(f, torch.Tensor) and f.ndim in [1, 2] and f.dtype == torch.float32) + assert isinstance(up, int) and (up >= 1) + assert isinstance(down, int) and (down >= 1) + assert isinstance(groups, int) and (groups >= 1) + out_channels, in_channels_per_group, kh, kw = _get_weight_shape(w) + fw, fh = _get_filter_size(f) + px0, px1, py0, py1 = _parse_padding(padding) + + # Adjust padding to account for up/downsampling. + if up > 1: + px0 += (fw + up - 1) // 2 + px1 += (fw - up) // 2 + py0 += (fh + up - 1) // 2 + py1 += (fh - up) // 2 + if down > 1: + px0 += (fw - down + 1) // 2 + px1 += (fw - down) // 2 + py0 += (fh - down + 1) // 2 + py1 += (fh - down) // 2 + + # Fast path: 1x1 convolution with downsampling only => downsample first, then convolve. + if kw == 1 and kh == 1 and (down > 1 and up == 1): + x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, padding=[px0,px1,py0,py1], flip_filter=flip_filter) + x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight) + return x + + # Fast path: 1x1 convolution with upsampling only => convolve first, then upsample. + if kw == 1 and kh == 1 and (up > 1 and down == 1): + x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight) + x = upfirdn2d.upfirdn2d(x=x, f=f, up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter) + return x + + # Fast path: downsampling only => use strided convolution. + if down > 1 and up == 1: + x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0,px1,py0,py1], flip_filter=flip_filter) + x = _conv2d_wrapper(x=x, w=w, stride=down, groups=groups, flip_weight=flip_weight) + return x + + # Fast path: upsampling with optional downsampling => use transpose strided convolution. + if up > 1: + if groups == 1: + w = w.transpose(0, 1) + else: + w = w.reshape(groups, out_channels // groups, in_channels_per_group, kh, kw) + w = w.transpose(1, 2) + w = w.reshape(groups * in_channels_per_group, out_channels // groups, kh, kw) + px0 -= kw - 1 + px1 -= kw - up + py0 -= kh - 1 + py1 -= kh - up + pxt = max(min(-px0, -px1), 0) + pyt = max(min(-py0, -py1), 0) + x = _conv2d_wrapper(x=x, w=w, stride=up, padding=[pyt,pxt], groups=groups, transpose=True, flip_weight=(not flip_weight)) + x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0+pxt,px1+pxt,py0+pyt,py1+pyt], gain=up**2, flip_filter=flip_filter) + if down > 1: + x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter) + return x + + # Fast path: no up/downsampling, padding supported by the underlying implementation => use plain conv2d. + if up == 1 and down == 1: + if px0 == px1 and py0 == py1 and px0 >= 0 and py0 >= 0: + return _conv2d_wrapper(x=x, w=w, padding=[py0,px0], groups=groups, flip_weight=flip_weight) + + # Fallback: Generic reference implementation. + x = upfirdn2d.upfirdn2d(x=x, f=(f if up > 1 else None), up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter) + x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight) + if down > 1: + x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter) + return x + +#---------------------------------------------------------------------------- diff --git a/torch_utils/ops/filtered_lrelu.cpp b/torch_utils/ops/filtered_lrelu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ff4149b8b46b54d2f400ae10e44d19f20503ba1f --- /dev/null +++ b/torch_utils/ops/filtered_lrelu.cpp @@ -0,0 +1,300 @@ +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// +// NVIDIA CORPORATION and its licensors retain all intellectual property +// and proprietary rights in and to this software, related documentation +// and any modifications thereto. Any use, reproduction, disclosure or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA CORPORATION is strictly prohibited. + +#include +#include +#include +#include "filtered_lrelu.h" + +//------------------------------------------------------------------------ + +static std::tuple filtered_lrelu( + torch::Tensor x, torch::Tensor fu, torch::Tensor fd, torch::Tensor b, torch::Tensor si, + int up, int down, int px0, int px1, int py0, int py1, int sx, int sy, float gain, float slope, float clamp, bool flip_filters, bool writeSigns) +{ + // Set CUDA device. + TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device"); + const at::cuda::OptionalCUDAGuard device_guard(device_of(x)); + + // Validate arguments. + TORCH_CHECK(fu.device() == x.device() && fd.device() == x.device() && b.device() == x.device(), "all input tensors must reside on the same device"); + TORCH_CHECK(fu.dtype() == torch::kFloat && fd.dtype() == torch::kFloat, "fu and fd must be float32"); + TORCH_CHECK(b.dtype() == x.dtype(), "x and b must have the same dtype"); + TORCH_CHECK(x.dtype() == torch::kHalf || x.dtype() == torch::kFloat, "x and b must be float16 or float32"); + TORCH_CHECK(x.dim() == 4, "x must be rank 4"); + TORCH_CHECK(x.size(0) * x.size(1) <= INT_MAX && x.size(2) <= INT_MAX && x.size(3) <= INT_MAX, "x is too large"); + TORCH_CHECK(x.numel() > 0, "x is empty"); + TORCH_CHECK((fu.dim() == 1 || fu.dim() == 2) && (fd.dim() == 1 || fd.dim() == 2), "fu and fd must be rank 1 or 2"); + TORCH_CHECK(fu.size(0) <= INT_MAX && fu.size(-1) <= INT_MAX, "fu is too large"); + TORCH_CHECK(fd.size(0) <= INT_MAX && fd.size(-1) <= INT_MAX, "fd is too large"); + TORCH_CHECK(fu.numel() > 0, "fu is empty"); + TORCH_CHECK(fd.numel() > 0, "fd is empty"); + TORCH_CHECK(b.dim() == 1 && b.size(0) == x.size(1), "b must be a vector with the same number of channels as x"); + TORCH_CHECK(up >= 1 && down >= 1, "up and down must be at least 1"); + + // Figure out how much shared memory is available on the device. + int maxSharedBytes = 0; + AT_CUDA_CHECK(cudaDeviceGetAttribute(&maxSharedBytes, cudaDevAttrMaxSharedMemoryPerBlockOptin, x.device().index())); + int sharedKB = maxSharedBytes >> 10; + + // Populate enough launch parameters to check if a CUDA kernel exists. + filtered_lrelu_kernel_params p; + p.up = up; + p.down = down; + p.fuShape = make_int2((int)fu.size(-1), fu.dim() == 2 ? (int)fu.size(0) : 0); // shape [n, 0] indicates separable filter. + p.fdShape = make_int2((int)fd.size(-1), fd.dim() == 2 ? (int)fd.size(0) : 0); + filtered_lrelu_kernel_spec test_spec = choose_filtered_lrelu_kernel(p, sharedKB); + if (!test_spec.exec) + { + // No kernel found - return empty tensors and indicate missing kernel with return code of -1. + return std::make_tuple(torch::Tensor(), torch::Tensor(), -1); + } + + // Input/output element size. + int64_t sz = (x.dtype() == torch::kHalf) ? 2 : 4; + + // Input sizes. + int64_t xw = (int)x.size(3); + int64_t xh = (int)x.size(2); + int64_t fut_w = (int)fu.size(-1) - 1; + int64_t fut_h = (int)fu.size(0) - 1; + int64_t fdt_w = (int)fd.size(-1) - 1; + int64_t fdt_h = (int)fd.size(0) - 1; + + // Logical size of upsampled buffer. + int64_t cw = xw * up + (px0 + px1) - fut_w; + int64_t ch = xh * up + (py0 + py1) - fut_h; + TORCH_CHECK(cw > fdt_w && ch > fdt_h, "upsampled buffer must be at least the size of downsampling filter"); + TORCH_CHECK(cw <= INT_MAX && ch <= INT_MAX, "upsampled buffer is too large"); + + // Compute output size and allocate. + int64_t yw = (cw - fdt_w + (down - 1)) / down; + int64_t yh = (ch - fdt_h + (down - 1)) / down; + TORCH_CHECK(yw > 0 && yh > 0, "output must be at least 1x1"); + TORCH_CHECK(yw <= INT_MAX && yh <= INT_MAX, "output is too large"); + torch::Tensor y = torch::empty({x.size(0), x.size(1), yh, yw}, x.options(), x.suggest_memory_format()); + + // Allocate sign tensor. + torch::Tensor so; + torch::Tensor s = si; + bool readSigns = !!s.numel(); + int64_t sw_active = 0; // Active width of sign tensor. + if (writeSigns) + { + sw_active = yw * down - (down - 1) + fdt_w; // Active width in elements. + int64_t sh = yh * down - (down - 1) + fdt_h; // Height = active height. + int64_t sw = (sw_active + 15) & ~15; // Width = active width in elements, rounded up to multiple of 16. + TORCH_CHECK(sh <= INT_MAX && (sw >> 2) <= INT_MAX, "signs is too large"); + s = so = torch::empty({x.size(0), x.size(1), sh, sw >> 2}, x.options().dtype(torch::kUInt8), at::MemoryFormat::Contiguous); + } + else if (readSigns) + sw_active = s.size(3) << 2; + + // Validate sign tensor if in use. + if (readSigns || writeSigns) + { + TORCH_CHECK(s.is_contiguous(), "signs must be contiguous"); + TORCH_CHECK(s.dtype() == torch::kUInt8, "signs must be uint8"); + TORCH_CHECK(s.device() == x.device(), "signs must reside on the same device as x"); + TORCH_CHECK(s.dim() == 4, "signs must be rank 4"); + TORCH_CHECK(s.size(0) == x.size(0) && s.size(1) == x.size(1), "signs must have same batch & channels as x"); + TORCH_CHECK(s.size(2) <= INT_MAX && s.size(3) <= INT_MAX, "signs is too large"); + } + + // Populate rest of CUDA kernel parameters. + p.x = x.data_ptr(); + p.y = y.data_ptr(); + p.b = b.data_ptr(); + p.s = (readSigns || writeSigns) ? s.data_ptr() : 0; + p.fu = fu.data_ptr(); + p.fd = fd.data_ptr(); + p.pad0 = make_int2(px0, py0); + p.gain = gain; + p.slope = slope; + p.clamp = clamp; + p.flip = (flip_filters) ? 1 : 0; + p.xShape = make_int4((int)x.size(3), (int)x.size(2), (int)x.size(1), (int)x.size(0)); + p.yShape = make_int4((int)y.size(3), (int)y.size(2), (int)y.size(1), (int)y.size(0)); + p.sShape = (readSigns || writeSigns) ? make_int2((int)s.size(3), (int)s.size(2)) : make_int2(0, 0); // Width is in bytes. Contiguous. + p.sOfs = make_int2(sx, sy); + p.swLimit = (sw_active + 3) >> 2; // Rounded up to bytes. + + // x, y, b strides are in bytes. + p.xStride = make_longlong4(sz * x.stride(3), sz * x.stride(2), sz * x.stride(1), sz * x.stride(0)); + p.yStride = make_longlong4(sz * y.stride(3), sz * y.stride(2), sz * y.stride(1), sz * y.stride(0)); + p.bStride = sz * b.stride(0); + + // fu, fd strides are in elements. + p.fuStride = make_longlong3(fu.stride(-1), fu.dim() == 2 ? fu.stride(0) : 0, 0); + p.fdStride = make_longlong3(fd.stride(-1), fd.dim() == 2 ? fd.stride(0) : 0, 0); + + // Determine if indices don't fit in int32. Support negative strides although Torch currently never produces those. + bool index64b = false; + if (std::abs(p.bStride * x.size(1)) > INT_MAX) index64b = true; + if (std::min(x.size(0) * p.xStride.w, 0ll) + std::min(x.size(1) * p.xStride.z, 0ll) + std::min(x.size(2) * p.xStride.y, 0ll) + std::min(x.size(3) * p.xStride.x, 0ll) < -INT_MAX) index64b = true; + if (std::max(x.size(0) * p.xStride.w, 0ll) + std::max(x.size(1) * p.xStride.z, 0ll) + std::max(x.size(2) * p.xStride.y, 0ll) + std::max(x.size(3) * p.xStride.x, 0ll) > INT_MAX) index64b = true; + if (std::min(y.size(0) * p.yStride.w, 0ll) + std::min(y.size(1) * p.yStride.z, 0ll) + std::min(y.size(2) * p.yStride.y, 0ll) + std::min(y.size(3) * p.yStride.x, 0ll) < -INT_MAX) index64b = true; + if (std::max(y.size(0) * p.yStride.w, 0ll) + std::max(y.size(1) * p.yStride.z, 0ll) + std::max(y.size(2) * p.yStride.y, 0ll) + std::max(y.size(3) * p.yStride.x, 0ll) > INT_MAX) index64b = true; + if (s.numel() > INT_MAX) index64b = true; + + // Choose CUDA kernel. + filtered_lrelu_kernel_spec spec = { 0 }; + AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "filtered_lrelu_cuda", [&] + { + if constexpr (sizeof(scalar_t) <= 4) // Exclude doubles. constexpr prevents template instantiation. + { + // Choose kernel based on index type, datatype and sign read/write modes. + if (!index64b && writeSigns && !readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); + else if (!index64b && !writeSigns && readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); + else if (!index64b && !writeSigns && !readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); + else if ( index64b && writeSigns && !readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); + else if ( index64b && !writeSigns && readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); + else if ( index64b && !writeSigns && !readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); + } + }); + TORCH_CHECK(spec.exec, "internal error - CUDA kernel not found") // This should not happen because we tested earlier that kernel exists. + + // Launch CUDA kernel. + void* args[] = {&p}; + int bx = spec.numWarps * 32; + int gx = (p.yShape.x - 1) / spec.tileOut.x + 1; + int gy = (p.yShape.y - 1) / spec.tileOut.y + 1; + int gz = p.yShape.z * p.yShape.w; + + // Repeat multiple horizontal tiles in a CTA? + if (spec.xrep) + { + p.tilesXrep = spec.xrep; + p.tilesXdim = gx; + + gx = (gx + p.tilesXrep - 1) / p.tilesXrep; + std::swap(gx, gy); + } + else + { + p.tilesXrep = 0; + p.tilesXdim = 0; + } + + // Launch filter setup kernel. + AT_CUDA_CHECK(cudaLaunchKernel(spec.setup, 1, 1024, args, 0, at::cuda::getCurrentCUDAStream())); + + // Copy kernels to constant memory. + if ( writeSigns && !readSigns) AT_CUDA_CHECK((copy_filters(at::cuda::getCurrentCUDAStream()))); + else if (!writeSigns && readSigns) AT_CUDA_CHECK((copy_filters(at::cuda::getCurrentCUDAStream()))); + else if (!writeSigns && !readSigns) AT_CUDA_CHECK((copy_filters(at::cuda::getCurrentCUDAStream()))); + + // Set cache and shared memory configurations for main kernel. + AT_CUDA_CHECK(cudaFuncSetCacheConfig(spec.exec, cudaFuncCachePreferShared)); + if (spec.dynamicSharedKB) // Need dynamically allocated shared memory? + AT_CUDA_CHECK(cudaFuncSetAttribute(spec.exec, cudaFuncAttributeMaxDynamicSharedMemorySize, spec.dynamicSharedKB << 10)); + AT_CUDA_CHECK(cudaFuncSetSharedMemConfig(spec.exec, cudaSharedMemBankSizeFourByte)); + + // Launch main kernel. + const int maxSubGz = 65535; // CUDA maximum for block z dimension. + for (int zofs=0; zofs < gz; zofs += maxSubGz) // Do multiple launches if gz is too big. + { + p.blockZofs = zofs; + int subGz = std::min(maxSubGz, gz - zofs); + AT_CUDA_CHECK(cudaLaunchKernel(spec.exec, dim3(gx, gy, subGz), bx, args, spec.dynamicSharedKB << 10, at::cuda::getCurrentCUDAStream())); + } + + // Done. + return std::make_tuple(y, so, 0); +} + +//------------------------------------------------------------------------ + +static torch::Tensor filtered_lrelu_act(torch::Tensor x, torch::Tensor si, int sx, int sy, float gain, float slope, float clamp, bool writeSigns) +{ + // Set CUDA device. + TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device"); + const at::cuda::OptionalCUDAGuard device_guard(device_of(x)); + + // Validate arguments. + TORCH_CHECK(x.dim() == 4, "x must be rank 4"); + TORCH_CHECK(x.size(0) * x.size(1) <= INT_MAX && x.size(2) <= INT_MAX && x.size(3) <= INT_MAX, "x is too large"); + TORCH_CHECK(x.numel() > 0, "x is empty"); + TORCH_CHECK(x.dtype() == torch::kHalf || x.dtype() == torch::kFloat || x.dtype() == torch::kDouble, "x must be float16, float32 or float64"); + + // Output signs if we don't have sign input. + torch::Tensor so; + torch::Tensor s = si; + bool readSigns = !!s.numel(); + if (writeSigns) + { + int64_t sw = x.size(3); + sw = (sw + 15) & ~15; // Round to a multiple of 16 for coalescing. + s = so = torch::empty({x.size(0), x.size(1), x.size(2), sw >> 2}, x.options().dtype(torch::kUInt8), at::MemoryFormat::Contiguous); + } + + // Validate sign tensor if in use. + if (readSigns || writeSigns) + { + TORCH_CHECK(s.is_contiguous(), "signs must be contiguous"); + TORCH_CHECK(s.dtype() == torch::kUInt8, "signs must be uint8"); + TORCH_CHECK(s.device() == x.device(), "signs must reside on the same device as x"); + TORCH_CHECK(s.dim() == 4, "signs must be rank 4"); + TORCH_CHECK(s.size(0) == x.size(0) && s.size(1) == x.size(1), "signs must have same batch & channels as x"); + TORCH_CHECK(s.size(2) <= INT_MAX && (s.size(3) << 2) <= INT_MAX, "signs tensor is too large"); + } + + // Initialize CUDA kernel parameters. + filtered_lrelu_act_kernel_params p; + p.x = x.data_ptr(); + p.s = (readSigns || writeSigns) ? s.data_ptr() : 0; + p.gain = gain; + p.slope = slope; + p.clamp = clamp; + p.xShape = make_int4((int)x.size(3), (int)x.size(2), (int)x.size(1), (int)x.size(0)); + p.xStride = make_longlong4(x.stride(3), x.stride(2), x.stride(1), x.stride(0)); + p.sShape = (readSigns || writeSigns) ? make_int2((int)s.size(3) << 2, (int)s.size(2)) : make_int2(0, 0); // Width is in elements. Contiguous. + p.sOfs = make_int2(sx, sy); + + // Choose CUDA kernel. + void* func = 0; + AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "filtered_lrelu_act_cuda", [&] + { + if (writeSigns) + func = choose_filtered_lrelu_act_kernel(); + else if (readSigns) + func = choose_filtered_lrelu_act_kernel(); + else + func = choose_filtered_lrelu_act_kernel(); + }); + TORCH_CHECK(func, "internal error - CUDA kernel not found"); + + // Launch CUDA kernel. + void* args[] = {&p}; + int bx = 128; // 4 warps per block. + + // Logical size of launch = writeSigns ? p.s : p.x + uint32_t gx = writeSigns ? p.sShape.x : p.xShape.x; + uint32_t gy = writeSigns ? p.sShape.y : p.xShape.y; + uint32_t gz = p.xShape.z * p.xShape.w; // Same as in p.sShape if signs are in use. + gx = (gx - 1) / bx + 1; + + // Make sure grid y and z dimensions are within CUDA launch limits. Kernel loops internally to do the rest. + const uint32_t gmax = 65535; + gy = std::min(gy, gmax); + gz = std::min(gz, gmax); + + // Launch. + AT_CUDA_CHECK(cudaLaunchKernel(func, dim3(gx, gy, gz), bx, args, 0, at::cuda::getCurrentCUDAStream())); + return so; +} + +//------------------------------------------------------------------------ + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("filtered_lrelu", &filtered_lrelu); // The whole thing. + m.def("filtered_lrelu_act_", &filtered_lrelu_act); // Activation and sign tensor handling only. Modifies data tensor in-place. +} + +//------------------------------------------------------------------------ diff --git a/torch_utils/ops/filtered_lrelu.cu b/torch_utils/ops/filtered_lrelu.cu new file mode 100644 index 0000000000000000000000000000000000000000..8e6f47f873d42f7181a0faf64779377e70be3012 --- /dev/null +++ b/torch_utils/ops/filtered_lrelu.cu @@ -0,0 +1,1284 @@ +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// +// NVIDIA CORPORATION and its licensors retain all intellectual property +// and proprietary rights in and to this software, related documentation +// and any modifications thereto. Any use, reproduction, disclosure or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA CORPORATION is strictly prohibited. + +#include +#include "filtered_lrelu.h" +#include + +//------------------------------------------------------------------------ +// Helpers. + +enum // Filter modes. +{ + MODE_SUSD = 0, // Separable upsampling, separable downsampling. + MODE_FUSD = 1, // Full upsampling, separable downsampling. + MODE_SUFD = 2, // Separable upsampling, full downsampling. + MODE_FUFD = 3, // Full upsampling, full downsampling. +}; + +template struct InternalType; +template <> struct InternalType +{ + typedef double scalar_t; typedef double2 vec2_t; typedef double4 vec4_t; + __device__ __forceinline__ static vec2_t zero_vec2(void) { return make_double2(0, 0); } + __device__ __forceinline__ static vec4_t zero_vec4(void) { return make_double4(0, 0, 0, 0); } + __device__ __forceinline__ static double clamp(double x, double c) { return fmin(fmax(x, -c), c); } +}; +template <> struct InternalType +{ + typedef float scalar_t; typedef float2 vec2_t; typedef float4 vec4_t; + __device__ __forceinline__ static vec2_t zero_vec2(void) { return make_float2(0, 0); } + __device__ __forceinline__ static vec4_t zero_vec4(void) { return make_float4(0, 0, 0, 0); } + __device__ __forceinline__ static float clamp(float x, float c) { return fminf(fmaxf(x, -c), c); } +}; +template <> struct InternalType +{ + typedef float scalar_t; typedef float2 vec2_t; typedef float4 vec4_t; + __device__ __forceinline__ static vec2_t zero_vec2(void) { return make_float2(0, 0); } + __device__ __forceinline__ static vec4_t zero_vec4(void) { return make_float4(0, 0, 0, 0); } + __device__ __forceinline__ static float clamp(float x, float c) { return fminf(fmaxf(x, -c), c); } +}; + +#define MIN(A, B) ((A) < (B) ? (A) : (B)) +#define MAX(A, B) ((A) > (B) ? (A) : (B)) +#define CEIL_DIV(A, B) (((B)==1) ? (A) : \ + ((B)==2) ? ((int)((A)+1) >> 1) : \ + ((B)==4) ? ((int)((A)+3) >> 2) : \ + (((A) + ((A) > 0 ? (B) - 1 : 0)) / (B))) + +// This works only up to blocks of size 256 x 256 and for all N that are powers of two. +template __device__ __forceinline__ void fast_div_mod(int& x, int& y, unsigned int i) +{ + if ((N & (N-1)) && N <= 256) + y = (i * ((1<<24)/N + 1)) >> 24; // Assumes N <= 256, i < N*256. + else + y = i/N; + + x = i - y*N; +} + +// Type cast stride before reading it. +template __device__ __forceinline__ T get_stride(const int64_t& x) +{ + return *reinterpret_cast(&x); +} + +//------------------------------------------------------------------------ +// Filters, setup kernel, copying function. + +#define MAX_FILTER_SIZE 32 + +// Combined up/down filter buffers so that transfer can be done with one copy. +__device__ float g_fbuf[2 * MAX_FILTER_SIZE * MAX_FILTER_SIZE]; // Filters in global memory, written by setup kernel. +__device__ __constant__ float c_fbuf[2 * MAX_FILTER_SIZE * MAX_FILTER_SIZE]; // Filters in constant memory, read by main kernel. + +// Accessors to combined buffers to index up/down filters individually. +#define c_fu (c_fbuf) +#define c_fd (c_fbuf + MAX_FILTER_SIZE * MAX_FILTER_SIZE) +#define g_fu (g_fbuf) +#define g_fd (g_fbuf + MAX_FILTER_SIZE * MAX_FILTER_SIZE) + +// Set up filters into global memory buffer. +static __global__ void setup_filters_kernel(filtered_lrelu_kernel_params p) +{ + for (int idx = threadIdx.x; idx < MAX_FILTER_SIZE * MAX_FILTER_SIZE; idx += blockDim.x) + { + int x, y; + fast_div_mod(x, y, idx); + + int fu_x = p.flip ? x : (p.fuShape.x - 1 - x); + int fu_y = p.flip ? y : (p.fuShape.y - 1 - y); + if (p.fuShape.y > 0) + g_fu[idx] = (x >= p.fuShape.x || y >= p.fuShape.y) ? 0.0f : p.fu[fu_x * p.fuStride.x + fu_y * p.fuStride.y]; + else + g_fu[idx] = (x >= p.fuShape.x || y > 0) ? 0.0f : p.fu[fu_x * p.fuStride.x]; + + int fd_x = p.flip ? x : (p.fdShape.x - 1 - x); + int fd_y = p.flip ? y : (p.fdShape.y - 1 - y); + if (p.fdShape.y > 0) + g_fd[idx] = (x >= p.fdShape.x || y >= p.fdShape.y) ? 0.0f : p.fd[fd_x * p.fdStride.x + fd_y * p.fdStride.y]; + else + g_fd[idx] = (x >= p.fdShape.x || y > 0) ? 0.0f : p.fd[fd_x * p.fdStride.x]; + } +} + +// Host function to copy filters written by setup kernel into constant buffer for main kernel. +template static cudaError_t copy_filters(cudaStream_t stream) +{ + void* src = 0; + cudaError_t err = cudaGetSymbolAddress(&src, g_fbuf); + if (err) return err; + return cudaMemcpyToSymbolAsync(c_fbuf, src, 2 * MAX_FILTER_SIZE * MAX_FILTER_SIZE * sizeof(float), 0, cudaMemcpyDeviceToDevice, stream); +} + +//------------------------------------------------------------------------ +// Coordinate spaces: +// - Relative to input tensor: inX, inY, tileInX, tileInY +// - Relative to input tile: relInX, relInY, tileInW, tileInH +// - Relative to upsampled tile: relUpX, relUpY, tileUpW, tileUpH +// - Relative to output tile: relOutX, relOutY, tileOutW, tileOutH +// - Relative to output tensor: outX, outY, tileOutX, tileOutY +// +// Relationships between coordinate spaces: +// - inX = tileInX + relInX +// - inY = tileInY + relInY +// - relUpX = relInX * up + phaseInX +// - relUpY = relInY * up + phaseInY +// - relUpX = relOutX * down +// - relUpY = relOutY * down +// - outX = tileOutX + relOutX +// - outY = tileOutY + relOutY + +extern __shared__ char s_buf_raw[]; // When sharedKB <= 48, allocate shared memory statically inside the kernel, otherwise use the externally allocated shared memory buffer. + +template +static __global__ void filtered_lrelu_kernel(filtered_lrelu_kernel_params p) +{ + // Check that we don't try to support non-existing filter modes. + static_assert(up == 1 || up == 2 || up == 4, "only up=1, up=2, up=4 scales supported"); + static_assert(down == 1 || down == 2 || down == 4, "only down=1, down=2, down=4 scales supported"); + static_assert(fuSize >= up, "upsampling filter size must be at least upsampling factor"); + static_assert(fdSize >= down, "downsampling filter size must be at least downsampling factor"); + static_assert(fuSize % up == 0, "upsampling filter size must be divisible with upsampling factor"); + static_assert(fdSize % down == 0, "downsampling filter size must be divisible with downsampling factor"); + static_assert(fuSize <= MAX_FILTER_SIZE && fdSize <= MAX_FILTER_SIZE, "filter size greater than MAX_FILTER_SIZE"); + static_assert(up != 1 || (fuSize == 1 && (filterMode == MODE_FUFD || filterMode == MODE_FUSD)), "up=1 supported only for 1x1 full filters"); + static_assert(down != 1 || (fdSize == 1 && (filterMode == MODE_FUFD || filterMode == MODE_SUFD)), "down=1 supported only for 1x1 full filters"); + static_assert(!(up == 4 && (filterMode == MODE_FUFD || filterMode == MODE_FUSD)), "full filters not supported for up=4"); + static_assert(!(down == 4 && (filterMode == MODE_FUFD || filterMode == MODE_SUFD)), "full filters not supported for down=4"); + + // Static definitions. + typedef typename InternalType::scalar_t scalar_t; + typedef typename InternalType::vec2_t vec2_t; + typedef typename InternalType::vec4_t vec4_t; + const int tileUpW = (tileOutW * down + (fdSize - 1) - (down - 1) + 3) & ~3; // Upsampled tile width, rounded up to multiple of 4. + const int tileUpH = tileOutH * down + (fdSize - 1) - (down - 1); // Upsampled tile height. + const int tileInW = CEIL_DIV(tileUpW + (fuSize - 1), up); // Input tile width. + const int tileInH = CEIL_DIV(tileUpH + (fuSize - 1), up); // Input tile height. + const int tileUpH_up = CEIL_DIV(tileUpH, up) * up; // Upsampled tile height rounded up to a multiple of up. + const int tileInH_up = CEIL_DIV(tileUpH_up + (fuSize - 1), up); // For allocations only, to avoid shared memory read overruns with up=2 and up=4. + + // Merge 1x1 downsampling into last upsampling step for upf1 and ups2. + const bool downInline = (down == 1) && ((up == 1 && filterMode == MODE_FUFD) || (up == 2 && filterMode == MODE_SUFD)); + + // Sizes of logical buffers. + const int szIn = tileInH_up * tileInW; + const int szUpX = tileInH_up * tileUpW; + const int szUpXY = downInline ? 0 : (tileUpH * tileUpW); + const int szDownX = tileUpH * tileOutW; + + // Sizes for shared memory arrays. + const int s_buf0_size_base = + (filterMode == MODE_SUSD) ? MAX(szIn, szUpXY) : + (filterMode == MODE_FUSD) ? MAX(szIn, szDownX) : + (filterMode == MODE_SUFD) ? MAX(szIn, szUpXY) : + (filterMode == MODE_FUFD) ? szIn : + -1; + const int s_buf1_size_base = + (filterMode == MODE_SUSD) ? MAX(szUpX, szDownX) : + (filterMode == MODE_FUSD) ? szUpXY : + (filterMode == MODE_SUFD) ? szUpX : + (filterMode == MODE_FUFD) ? szUpXY : + -1; + + // Ensure U128 alignment. + const int s_buf0_size = (s_buf0_size_base + 3) & ~3; + const int s_buf1_size = (s_buf1_size_base + 3) & ~3; + + // Check at compile time that we don't use too much shared memory. + static_assert((s_buf0_size + s_buf1_size) * sizeof(scalar_t) <= (sharedKB << 10), "shared memory overflow"); + + // Declare shared memory arrays. + scalar_t* s_buf0; + scalar_t* s_buf1; + if (sharedKB <= 48) + { + // Allocate shared memory arrays here. + __shared__ scalar_t s_buf0_st[(sharedKB > 48) ? (1<<24) : (s_buf0_size + s_buf1_size)]; // Prevent launching if this isn't optimized away when unused. + s_buf0 = s_buf0_st; + s_buf1 = s_buf0 + s_buf0_size; + } + else + { + // Use the dynamically allocated shared memory array. + s_buf0 = (scalar_t*)s_buf_raw; + s_buf1 = s_buf0 + s_buf0_size; + } + + // Pointers to the buffers. + scalar_t* s_tileIn; // Input tile: [relInX * tileInH + relInY] + scalar_t* s_tileUpX; // After horizontal upsampling: [relInY * tileUpW + relUpX] + scalar_t* s_tileUpXY; // After upsampling: [relUpY * tileUpW + relUpX] + scalar_t* s_tileDownX; // After horizontal downsampling: [relUpY * tileOutW + relOutX] + if (filterMode == MODE_SUSD) + { + s_tileIn = s_buf0; + s_tileUpX = s_buf1; + s_tileUpXY = s_buf0; + s_tileDownX = s_buf1; + } + else if (filterMode == MODE_FUSD) + { + s_tileIn = s_buf0; + s_tileUpXY = s_buf1; + s_tileDownX = s_buf0; + } + else if (filterMode == MODE_SUFD) + { + s_tileIn = s_buf0; + s_tileUpX = s_buf1; + s_tileUpXY = s_buf0; + } + else if (filterMode == MODE_FUFD) + { + s_tileIn = s_buf0; + s_tileUpXY = s_buf1; + } + + // Allow large grids in z direction via per-launch offset. + int channelIdx = blockIdx.z + p.blockZofs; + int batchIdx = channelIdx / p.yShape.z; + channelIdx -= batchIdx * p.yShape.z; + + // Offset to output feature map. In bytes. + index_t mapOfsOut = channelIdx * get_stride(p.yStride.z) + batchIdx * get_stride(p.yStride.w); + + // Sign shift amount. + uint32_t signXo = ((threadIdx.x + p.sOfs.x) << 1) & 6; + + // Inner tile loop. + #pragma unroll 1 + for (int tileIdx = 0; !enableXrep || (tileIdx < MIN(p.tilesXrep, p.tilesXdim - p.tilesXrep * blockIdx.y)); tileIdx++) + { + // Locate output tile. + int tileX = enableXrep ? blockIdx.y * p.tilesXrep + tileIdx : blockIdx.x; + int tileOutX = tileX * tileOutW; + int tileOutY = (enableXrep ? blockIdx.x : blockIdx.y) * tileOutH; + + // Locate input tile. + int tmpX = tileOutX * down - p.pad0.x; + int tmpY = tileOutY * down - p.pad0.y; + int tileInX = CEIL_DIV(tmpX, up); + int tileInY = CEIL_DIV(tmpY, up); + const int phaseInX = tileInX * up - tmpX; + const int phaseInY = tileInY * up - tmpY; + + // Extra sync if input and output buffers are the same and we are not on first tile. + if (enableXrep && tileIdx > 0 && (filterMode == MODE_FUSD || (filterMode == MODE_SUFD && !downInline) || (filterMode == MODE_FUFD && downInline))) + __syncthreads(); + + // Load input tile & apply bias. Unrolled. + scalar_t b = (scalar_t)*(const T*)((const char*)p.b + (channelIdx * get_stride(p.bStride))); + index_t mapOfsIn = channelIdx * get_stride(p.xStride.z) + batchIdx * get_stride(p.xStride.w); + int idx = threadIdx.x; + const int loopCountIN = CEIL_DIV(tileInW * tileInH, threadsPerBlock); + #pragma unroll + for (int loop = 0; loop < loopCountIN; loop++) + { + int relInX, relInY; + fast_div_mod(relInX, relInY, idx); + int inX = tileInX + relInX; + int inY = tileInY + relInY; + scalar_t v = 0; + + if ((uint32_t)inX < p.xShape.x && (uint32_t)inY < p.xShape.y) + v = (scalar_t)*((const T*)((const char*)p.x + (inX * get_stride(p.xStride.x) + inY * get_stride(p.xStride.y) + mapOfsIn))) + b; + + bool skip = (loop == loopCountIN-1) && (idx >= tileInW * tileInH); + if (!skip) + s_tileIn[idx] = v; + + idx += threadsPerBlock; + } + + if (filterMode == MODE_SUSD || filterMode == MODE_SUFD) // Separable upsampling filter. + { + // Horizontal upsampling. + __syncthreads(); + if (up == 4) + { + for (int idx = threadIdx.x*up; idx < tileUpW * tileInH; idx += blockDim.x*up) + { + int relUpX0, relInY; + fast_div_mod(relUpX0, relInY, idx); + int relInX0 = relUpX0 / up; + int src0 = relInX0 + tileInW * relInY; + int dst = relInY * tileUpW + relUpX0; + vec4_t v = InternalType::zero_vec4(); + scalar_t a = s_tileIn[src0]; + if (phaseInX == 0) + { + #pragma unroll + for (int step = 0; step < fuSize / up; step++) + { + v.x += a * (scalar_t)c_fu[step * up + 0]; + a = s_tileIn[src0 + step + 1]; + v.y += a * (scalar_t)c_fu[step * up + 3]; + v.z += a * (scalar_t)c_fu[step * up + 2]; + v.w += a * (scalar_t)c_fu[step * up + 1]; + } + } + else if (phaseInX == 1) + { + #pragma unroll + for (int step = 0; step < fuSize / up; step++) + { + v.x += a * (scalar_t)c_fu[step * up + 1]; + v.y += a * (scalar_t)c_fu[step * up + 0]; + a = s_tileIn[src0 + step + 1]; + v.z += a * (scalar_t)c_fu[step * up + 3]; + v.w += a * (scalar_t)c_fu[step * up + 2]; + } + } + else if (phaseInX == 2) + { + #pragma unroll + for (int step = 0; step < fuSize / up; step++) + { + v.x += a * (scalar_t)c_fu[step * up + 2]; + v.y += a * (scalar_t)c_fu[step * up + 1]; + v.z += a * (scalar_t)c_fu[step * up + 0]; + a = s_tileIn[src0 + step + 1]; + v.w += a * (scalar_t)c_fu[step * up + 3]; + } + } + else // (phaseInX == 3) + { + #pragma unroll + for (int step = 0; step < fuSize / up; step++) + { + v.x += a * (scalar_t)c_fu[step * up + 3]; + v.y += a * (scalar_t)c_fu[step * up + 2]; + v.z += a * (scalar_t)c_fu[step * up + 1]; + v.w += a * (scalar_t)c_fu[step * up + 0]; + a = s_tileIn[src0 + step + 1]; + } + } + s_tileUpX[dst+0] = v.x; + s_tileUpX[dst+1] = v.y; + s_tileUpX[dst+2] = v.z; + s_tileUpX[dst+3] = v.w; + } + } + else if (up == 2) + { + bool p0 = (phaseInX == 0); + for (int idx = threadIdx.x*up; idx < tileUpW * tileInH; idx += blockDim.x*up) + { + int relUpX0, relInY; + fast_div_mod(relUpX0, relInY, idx); + int relInX0 = relUpX0 / up; + int src0 = relInX0 + tileInW * relInY; + int dst = relInY * tileUpW + relUpX0; + vec2_t v = InternalType::zero_vec2(); + scalar_t a = s_tileIn[src0]; + if (p0) // (phaseInX == 0) + { + #pragma unroll + for (int step = 0; step < fuSize / up; step++) + { + v.x += a * (scalar_t)c_fu[step * up + 0]; + a = s_tileIn[src0 + step + 1]; + v.y += a * (scalar_t)c_fu[step * up + 1]; + } + } + else // (phaseInX == 1) + { + #pragma unroll + for (int step = 0; step < fuSize / up; step++) + { + v.x += a * (scalar_t)c_fu[step * up + 1]; + v.y += a * (scalar_t)c_fu[step * up + 0]; + a = s_tileIn[src0 + step + 1]; + } + } + s_tileUpX[dst+0] = v.x; + s_tileUpX[dst+1] = v.y; + } + } + + // Vertical upsampling & nonlinearity. + + __syncthreads(); + int groupMask = 15 << ((threadIdx.x & 31) & ~3); + int minY = tileOutY ? (tileOutY - tileOutH) * down + tileUpH : 0; // Skip already written signs. + int sShapeMaxY = MIN(p.sShape.y, tileOutY * down + tileUpH); // Avoid out-of-tile sign writes. + if (up == 4) + { + minY -= 3; // Adjust according to block height. + for (int idx = threadIdx.x; idx < tileUpW * tileUpH_up / up; idx += blockDim.x) + { + int relUpX, relInY0; + fast_div_mod(relUpX, relInY0, idx); + int relUpY0 = relInY0 * up; + int src0 = relInY0 * tileUpW + relUpX; + int dst = relUpY0 * tileUpW + relUpX; + vec4_t v = InternalType::zero_vec4(); + + scalar_t a = s_tileUpX[src0]; + if (phaseInY == 0) + { + #pragma unroll + for (int step = 0; step < fuSize / up; step++) + { + v.x += a * (scalar_t)c_fu[step * up + 0]; + a = s_tileUpX[src0 + (step + 1) * tileUpW]; + v.y += a * (scalar_t)c_fu[step * up + 3]; + v.z += a * (scalar_t)c_fu[step * up + 2]; + v.w += a * (scalar_t)c_fu[step * up + 1]; + } + } + else if (phaseInY == 1) + { + #pragma unroll + for (int step = 0; step < fuSize / up; step++) + { + v.x += a * (scalar_t)c_fu[step * up + 1]; + v.y += a * (scalar_t)c_fu[step * up + 0]; + a = s_tileUpX[src0 + (step + 1) * tileUpW]; + v.z += a * (scalar_t)c_fu[step * up + 3]; + v.w += a * (scalar_t)c_fu[step * up + 2]; + } + } + else if (phaseInY == 2) + { + #pragma unroll + for (int step = 0; step < fuSize / up; step++) + { + v.x += a * (scalar_t)c_fu[step * up + 2]; + v.y += a * (scalar_t)c_fu[step * up + 1]; + v.z += a * (scalar_t)c_fu[step * up + 0]; + a = s_tileUpX[src0 + (step + 1) * tileUpW]; + v.w += a * (scalar_t)c_fu[step * up + 3]; + } + } + else // (phaseInY == 3) + { + #pragma unroll + for (int step = 0; step < fuSize / up; step++) + { + v.x += a * (scalar_t)c_fu[step * up + 3]; + v.y += a * (scalar_t)c_fu[step * up + 2]; + v.z += a * (scalar_t)c_fu[step * up + 1]; + v.w += a * (scalar_t)c_fu[step * up + 0]; + a = s_tileUpX[src0 + (step + 1) * tileUpW]; + } + } + + int x = tileOutX * down + relUpX; + int y = tileOutY * down + relUpY0; + int signX = x + p.sOfs.x; + int signY = y + p.sOfs.y; + int signZ = blockIdx.z + p.blockZofs; + int signXb = signX >> 2; + index_t si0 = signXb + p.sShape.x * (signY + (index_t)p.sShape.y * signZ); + index_t si1 = si0 + p.sShape.x; + index_t si2 = si0 + p.sShape.x * 2; + index_t si3 = si0 + p.sShape.x * 3; + + v.x *= (scalar_t)((float)up * (float)up * p.gain); + v.y *= (scalar_t)((float)up * (float)up * p.gain); + v.z *= (scalar_t)((float)up * (float)up * p.gain); + v.w *= (scalar_t)((float)up * (float)up * p.gain); + + if (signWrite) + { + if (!enableWriteSkip) + { + // Determine and write signs. + int sx = __float_as_uint(v.x) >> 31 << 0; + int sy = __float_as_uint(v.y) >> 31 << 8; + int sz = __float_as_uint(v.z) >> 31 << 16; + int sw = __float_as_uint(v.w) >> 31 << 24; + if (sx) v.x *= p.slope; + if (sy) v.y *= p.slope; + if (sz) v.z *= p.slope; + if (sw) v.w *= p.slope; + if (fabsf(v.x) > p.clamp) { sx = 2 << 0; v.x = InternalType::clamp(v.x, p.clamp); } + if (fabsf(v.y) > p.clamp) { sy = 2 << 8; v.y = InternalType::clamp(v.y, p.clamp); } + if (fabsf(v.z) > p.clamp) { sz = 2 << 16; v.z = InternalType::clamp(v.z, p.clamp); } + if (fabsf(v.w) > p.clamp) { sw = 2 << 24; v.w = InternalType::clamp(v.w, p.clamp); } + + if ((uint32_t)signXb < p.swLimit && signY >= minY) + { + // Combine signs. + uint32_t s = sx + sy + sw + sz; + s <<= (signX & 3) << 1; + s |= __shfl_xor_sync(groupMask, s, 1); + s |= __shfl_xor_sync(groupMask, s, 2); + + // Write signs. + if ((uint32_t)(signY + 0) < sShapeMaxY) { p.s[si0] = (unsigned char)(s >> 0); } + if ((uint32_t)(signY + 1) < sShapeMaxY) { p.s[si1] = (unsigned char)(s >> 8); } + if ((uint32_t)(signY + 2) < sShapeMaxY) { p.s[si2] = (unsigned char)(s >> 16); } + if ((uint32_t)(signY + 3) < sShapeMaxY) { p.s[si3] = (unsigned char)(s >> 24); } + } + } + else + { + // Determine and write signs. + if ((uint32_t)signXb < p.swLimit && signY >= minY) + { + int sx = __float_as_uint(v.x) >> 31 << 0; + int sy = __float_as_uint(v.y) >> 31 << 8; + int sz = __float_as_uint(v.z) >> 31 << 16; + int sw = __float_as_uint(v.w) >> 31 << 24; + if (sx) v.x *= p.slope; + if (sy) v.y *= p.slope; + if (sz) v.z *= p.slope; + if (sw) v.w *= p.slope; + if (fabsf(v.x) > p.clamp) { sx = 2 << 0; v.x = InternalType::clamp(v.x, p.clamp); } + if (fabsf(v.y) > p.clamp) { sy = 2 << 8; v.y = InternalType::clamp(v.y, p.clamp); } + if (fabsf(v.z) > p.clamp) { sz = 2 << 16; v.z = InternalType::clamp(v.z, p.clamp); } + if (fabsf(v.w) > p.clamp) { sw = 2 << 24; v.w = InternalType::clamp(v.w, p.clamp); } + + // Combine signs. + uint32_t s = sx + sy + sw + sz; + s <<= (signX & 3) << 1; + s |= __shfl_xor_sync(groupMask, s, 1); + s |= __shfl_xor_sync(groupMask, s, 2); + + // Write signs. + if ((uint32_t)(signY + 0) < sShapeMaxY) { p.s[si0] = (unsigned char)(s >> 0); } + if ((uint32_t)(signY + 1) < sShapeMaxY) { p.s[si1] = (unsigned char)(s >> 8); } + if ((uint32_t)(signY + 2) < sShapeMaxY) { p.s[si2] = (unsigned char)(s >> 16); } + if ((uint32_t)(signY + 3) < sShapeMaxY) { p.s[si3] = (unsigned char)(s >> 24); } + } + else + { + // Just compute the values. + if (v.x < 0.f) v.x *= p.slope; v.x = InternalType::clamp(v.x, p.clamp); + if (v.y < 0.f) v.y *= p.slope; v.y = InternalType::clamp(v.y, p.clamp); + if (v.z < 0.f) v.z *= p.slope; v.z = InternalType::clamp(v.z, p.clamp); + if (v.w < 0.f) v.w *= p.slope; v.w = InternalType::clamp(v.w, p.clamp); + } + } + } + else if (signRead) // Read signs and apply. + { + if ((uint32_t)signXb < p.swLimit) + { + int ss = (signX & 3) << 1; + if ((uint32_t)(signY + 0) < p.sShape.y) { int s = p.s[si0] >> ss; if (s & 1) v.x *= p.slope; if (s & 2) v.x = 0.f; } + if ((uint32_t)(signY + 1) < p.sShape.y) { int s = p.s[si1] >> ss; if (s & 1) v.y *= p.slope; if (s & 2) v.y = 0.f; } + if ((uint32_t)(signY + 2) < p.sShape.y) { int s = p.s[si2] >> ss; if (s & 1) v.z *= p.slope; if (s & 2) v.z = 0.f; } + if ((uint32_t)(signY + 3) < p.sShape.y) { int s = p.s[si3] >> ss; if (s & 1) v.w *= p.slope; if (s & 2) v.w = 0.f; } + } + } + else // Forward pass with no sign write. + { + if (v.x < 0.f) v.x *= p.slope; v.x = InternalType::clamp(v.x, p.clamp); + if (v.y < 0.f) v.y *= p.slope; v.y = InternalType::clamp(v.y, p.clamp); + if (v.z < 0.f) v.z *= p.slope; v.z = InternalType::clamp(v.z, p.clamp); + if (v.w < 0.f) v.w *= p.slope; v.w = InternalType::clamp(v.w, p.clamp); + } + + s_tileUpXY[dst + 0 * tileUpW] = v.x; + if (relUpY0 + 1 < tileUpH) s_tileUpXY[dst + 1 * tileUpW] = v.y; + if (relUpY0 + 2 < tileUpH) s_tileUpXY[dst + 2 * tileUpW] = v.z; + if (relUpY0 + 3 < tileUpH) s_tileUpXY[dst + 3 * tileUpW] = v.w; + } + } + else if (up == 2) + { + minY -= 1; // Adjust according to block height. + for (int idx = threadIdx.x; idx < tileUpW * tileUpH_up / up; idx += blockDim.x) + { + int relUpX, relInY0; + fast_div_mod(relUpX, relInY0, idx); + int relUpY0 = relInY0 * up; + int src0 = relInY0 * tileUpW + relUpX; + int dst = relUpY0 * tileUpW + relUpX; + vec2_t v = InternalType::zero_vec2(); + + scalar_t a = s_tileUpX[src0]; + if (phaseInY == 0) + { + #pragma unroll + for (int step = 0; step < fuSize / up; step++) + { + v.x += a * (scalar_t)c_fu[step * up + 0]; + a = s_tileUpX[src0 + (step + 1) * tileUpW]; + v.y += a * (scalar_t)c_fu[step * up + 1]; + } + } + else // (phaseInY == 1) + { + #pragma unroll + for (int step = 0; step < fuSize / up; step++) + { + v.x += a * (scalar_t)c_fu[step * up + 1]; + v.y += a * (scalar_t)c_fu[step * up + 0]; + a = s_tileUpX[src0 + (step + 1) * tileUpW]; + } + } + + int x = tileOutX * down + relUpX; + int y = tileOutY * down + relUpY0; + int signX = x + p.sOfs.x; + int signY = y + p.sOfs.y; + int signZ = blockIdx.z + p.blockZofs; + int signXb = signX >> 2; + index_t si0 = signXb + p.sShape.x * (signY + (index_t)p.sShape.y * signZ); + index_t si1 = si0 + p.sShape.x; + + v.x *= (scalar_t)((float)up * (float)up * p.gain); + v.y *= (scalar_t)((float)up * (float)up * p.gain); + + if (signWrite) + { + if (!enableWriteSkip) + { + // Determine and write signs. + int sx = __float_as_uint(v.x) >> 31 << 0; + int sy = __float_as_uint(v.y) >> 31 << 8; + if (sx) v.x *= p.slope; + if (sy) v.y *= p.slope; + if (fabsf(v.x) > p.clamp) { sx = 2 << 0; v.x = InternalType::clamp(v.x, p.clamp); } + if (fabsf(v.y) > p.clamp) { sy = 2 << 8; v.y = InternalType::clamp(v.y, p.clamp); } + + if ((uint32_t)signXb < p.swLimit && signY >= minY) + { + // Combine signs. + int s = sx + sy; + s <<= signXo; + s |= __shfl_xor_sync(groupMask, s, 1); + s |= __shfl_xor_sync(groupMask, s, 2); + + // Write signs. + if ((uint32_t)(signY + 0) < sShapeMaxY) { p.s[si0] = (unsigned char)(s >> 0); } + if ((uint32_t)(signY + 1) < sShapeMaxY) { p.s[si1] = (unsigned char)(s >> 8); } + } + } + else + { + // Determine and write signs. + if ((uint32_t)signXb < p.swLimit && signY >= minY) + { + int sx = __float_as_uint(v.x) >> 31 << 0; + int sy = __float_as_uint(v.y) >> 31 << 8; + if (sx) v.x *= p.slope; + if (sy) v.y *= p.slope; + if (fabsf(v.x) > p.clamp) { sx = 2 << 0; v.x = InternalType::clamp(v.x, p.clamp); } + if (fabsf(v.y) > p.clamp) { sy = 2 << 8; v.y = InternalType::clamp(v.y, p.clamp); } + + // Combine signs. + int s = sx + sy; + s <<= signXo; + s |= __shfl_xor_sync(groupMask, s, 1); + s |= __shfl_xor_sync(groupMask, s, 2); + + // Write signs. + if ((uint32_t)(signY + 0) < sShapeMaxY) { p.s[si0] = (unsigned char)(s >> 0); } + if ((uint32_t)(signY + 1) < sShapeMaxY) { p.s[si1] = (unsigned char)(s >> 8); } + } + else + { + // Just compute the values. + if (v.x < 0.f) v.x *= p.slope; v.x = InternalType::clamp(v.x, p.clamp); + if (v.y < 0.f) v.y *= p.slope; v.y = InternalType::clamp(v.y, p.clamp); + } + } + } + else if (signRead) // Read signs and apply. + { + if ((uint32_t)signXb < p.swLimit) + { + if ((uint32_t)(signY + 0) < p.sShape.y) { int s = p.s[si0] >> signXo; if (s & 1) v.x *= p.slope; if (s & 2) v.x = 0.f; } + if ((uint32_t)(signY + 1) < p.sShape.y) { int s = p.s[si1] >> signXo; if (s & 1) v.y *= p.slope; if (s & 2) v.y = 0.f; } + } + } + else // Forward pass with no sign write. + { + if (v.x < 0.f) v.x *= p.slope; v.x = InternalType::clamp(v.x, p.clamp); + if (v.y < 0.f) v.y *= p.slope; v.y = InternalType::clamp(v.y, p.clamp); + } + + if (!downInline) + { + // Write into temporary buffer. + s_tileUpXY[dst] = v.x; + if (relUpY0 < tileUpH - 1) + s_tileUpXY[dst + tileUpW] = v.y; + } + else + { + // Write directly into output buffer. + if ((uint32_t)x < p.yShape.x) + { + int ymax = MIN(p.yShape.y, tileUpH + tileOutY * down); + index_t ofs = x * get_stride(p.yStride.x) + y * get_stride(p.yStride.y) + mapOfsOut; + if ((uint32_t)y + 0 < p.yShape.y) *((T*)((char*)p.y + ofs)) = (T)(v.x * (scalar_t)c_fd[0]); + if ((uint32_t)y + 1 < ymax) *((T*)((char*)p.y + ofs + get_stride(p.yStride.y))) = (T)(v.y * (scalar_t)c_fd[0]); + } + } + } + } + } + else if (filterMode == MODE_FUSD || filterMode == MODE_FUFD) + { + // Full upsampling filter. + + if (up == 2) + { + // 2 x 2-wide. + __syncthreads(); + int minY = tileOutY ? (tileOutY - tileOutH) * down + tileUpH + p.sOfs.y : 0; // Skip already written signs. + for (int idx = threadIdx.x * 4; idx < tileUpW * tileUpH; idx += blockDim.x * 4) + { + int relUpX0, relUpY0; + fast_div_mod(relUpX0, relUpY0, idx); + int relInX0 = CEIL_DIV(relUpX0 - phaseInX, up); + int relInY0 = CEIL_DIV(relUpY0 - phaseInY, up); + int src0 = relInX0 + tileInW * relInY0; + int tap0y = (relInY0 * up + phaseInY - relUpY0); + + #define X_LOOP(TAPY, PX) \ + for (int sx = 0; sx < fuSize / up; sx++) \ + { \ + v.x += a * (scalar_t)c_fu[(sx * up + (((PX) - 0) & (up - 1))) + (sy * up + (TAPY)) * MAX_FILTER_SIZE]; \ + v.z += b * (scalar_t)c_fu[(sx * up + (((PX) - 0) & (up - 1))) + (sy * up + (TAPY)) * MAX_FILTER_SIZE]; if ((PX) == 0) { a = b; b = s_tileIn[src0 + 2 + sx + sy * tileInW]; } \ + v.y += a * (scalar_t)c_fu[(sx * up + (((PX) - 1) & (up - 1))) + (sy * up + (TAPY)) * MAX_FILTER_SIZE]; \ + v.w += b * (scalar_t)c_fu[(sx * up + (((PX) - 1) & (up - 1))) + (sy * up + (TAPY)) * MAX_FILTER_SIZE]; if ((PX) == 1) { a = b; b = s_tileIn[src0 + 2 + sx + sy * tileInW]; } \ + } + + vec4_t v = InternalType::zero_vec4(); + if (tap0y == 0 && phaseInX == 0) + #pragma unroll + for (int sy = 0; sy < fuSize / up; sy++) { scalar_t a = s_tileIn[src0 + sy * tileInW]; scalar_t b = s_tileIn[src0 + sy * tileInW + 1]; + #pragma unroll + X_LOOP(0, 0) } + if (tap0y == 0 && phaseInX == 1) + #pragma unroll + for (int sy = 0; sy < fuSize / up; sy++) { scalar_t a = s_tileIn[src0 + sy * tileInW]; scalar_t b = s_tileIn[src0 + sy * tileInW + 1]; + #pragma unroll + X_LOOP(0, 1) } + if (tap0y == 1 && phaseInX == 0) + #pragma unroll + for (int sy = 0; sy < fuSize / up; sy++) { scalar_t a = s_tileIn[src0 + sy * tileInW]; scalar_t b = s_tileIn[src0 + sy * tileInW + 1]; + #pragma unroll + X_LOOP(1, 0) } + if (tap0y == 1 && phaseInX == 1) + #pragma unroll + for (int sy = 0; sy < fuSize / up; sy++) { scalar_t a = s_tileIn[src0 + sy * tileInW]; scalar_t b = s_tileIn[src0 + sy * tileInW + 1]; + #pragma unroll + X_LOOP(1, 1) } + + #undef X_LOOP + + int x = tileOutX * down + relUpX0; + int y = tileOutY * down + relUpY0; + int signX = x + p.sOfs.x; + int signY = y + p.sOfs.y; + int signZ = blockIdx.z + p.blockZofs; + int signXb = signX >> 2; + index_t si = signXb + p.sShape.x * (signY + (index_t)p.sShape.y * signZ); + + v.x *= (scalar_t)((float)up * (float)up * p.gain); + v.y *= (scalar_t)((float)up * (float)up * p.gain); + v.z *= (scalar_t)((float)up * (float)up * p.gain); + v.w *= (scalar_t)((float)up * (float)up * p.gain); + + if (signWrite) + { + if (!enableWriteSkip) + { + // Determine and write signs. + int sx = __float_as_uint(v.x) >> 31; + int sy = __float_as_uint(v.y) >> 31; + int sz = __float_as_uint(v.z) >> 31; + int sw = __float_as_uint(v.w) >> 31; + if (sx) v.x *= p.slope; if (fabsf(v.x) > p.clamp) { sx = 2; v.x = InternalType::clamp(v.x, p.clamp); } + if (sy) v.y *= p.slope; if (fabsf(v.y) > p.clamp) { sy = 2; v.y = InternalType::clamp(v.y, p.clamp); } + if (sz) v.z *= p.slope; if (fabsf(v.z) > p.clamp) { sz = 2; v.z = InternalType::clamp(v.z, p.clamp); } + if (sw) v.w *= p.slope; if (fabsf(v.w) > p.clamp) { sw = 2; v.w = InternalType::clamp(v.w, p.clamp); } + + if ((uint32_t)signXb < p.swLimit && (uint32_t)signY < p.sShape.y && signY >= minY) + { + p.s[si] = sx + (sy << 2) + (sz << 4) + (sw << 6); + } + } + else + { + // Determine and write signs. + if ((uint32_t)signXb < p.swLimit && (uint32_t)signY < p.sShape.y && signY >= minY) + { + int sx = __float_as_uint(v.x) >> 31; + int sy = __float_as_uint(v.y) >> 31; + int sz = __float_as_uint(v.z) >> 31; + int sw = __float_as_uint(v.w) >> 31; + if (sx) v.x *= p.slope; if (fabsf(v.x) > p.clamp) { sx = 2; v.x = InternalType::clamp(v.x, p.clamp); } + if (sy) v.y *= p.slope; if (fabsf(v.y) > p.clamp) { sy = 2; v.y = InternalType::clamp(v.y, p.clamp); } + if (sz) v.z *= p.slope; if (fabsf(v.z) > p.clamp) { sz = 2; v.z = InternalType::clamp(v.z, p.clamp); } + if (sw) v.w *= p.slope; if (fabsf(v.w) > p.clamp) { sw = 2; v.w = InternalType::clamp(v.w, p.clamp); } + + p.s[si] = sx + (sy << 2) + (sz << 4) + (sw << 6); + } + else + { + // Just compute the values. + if (v.x < 0.f) v.x *= p.slope; v.x = InternalType::clamp(v.x, p.clamp); + if (v.y < 0.f) v.y *= p.slope; v.y = InternalType::clamp(v.y, p.clamp); + if (v.z < 0.f) v.z *= p.slope; v.z = InternalType::clamp(v.z, p.clamp); + if (v.w < 0.f) v.w *= p.slope; v.w = InternalType::clamp(v.w, p.clamp); + } + } + } + else if (signRead) // Read sign and apply. + { + if ((uint32_t)signY < p.sShape.y) + { + int s = 0; + if ((uint32_t)signXb < p.swLimit) s = p.s[si]; + if ((uint32_t)signXb + 1 < p.swLimit) s |= p.s[si + 1] << 8; + s >>= (signX & 3) << 1; + if (s & 0x01) v.x *= p.slope; if (s & 0x02) v.x = 0.f; + if (s & 0x04) v.y *= p.slope; if (s & 0x08) v.y = 0.f; + if (s & 0x10) v.z *= p.slope; if (s & 0x20) v.z = 0.f; + if (s & 0x40) v.w *= p.slope; if (s & 0x80) v.w = 0.f; + } + } + else // Forward pass with no sign write. + { + if (v.x < 0.f) v.x *= p.slope; v.x = InternalType::clamp(v.x, p.clamp); + if (v.y < 0.f) v.y *= p.slope; v.y = InternalType::clamp(v.y, p.clamp); + if (v.z < 0.f) v.z *= p.slope; v.z = InternalType::clamp(v.z, p.clamp); + if (v.w < 0.f) v.w *= p.slope; v.w = InternalType::clamp(v.w, p.clamp); + } + + s_tileUpXY[idx + 0] = v.x; + s_tileUpXY[idx + 1] = v.y; + s_tileUpXY[idx + 2] = v.z; + s_tileUpXY[idx + 3] = v.w; + } + } + else if (up == 1) + { + __syncthreads(); + uint32_t groupMask = 15 << ((threadIdx.x & 31) & ~3); + int minY = tileOutY ? (tileOutY - tileOutH) * down + tileUpH : 0; // Skip already written signs. + for (int idx = threadIdx.x; idx < tileUpW * tileUpH; idx += blockDim.x) + { + int relUpX0, relUpY0; + fast_div_mod(relUpX0, relUpY0, idx); + scalar_t v = s_tileIn[idx] * (scalar_t)c_fu[0]; // 1x1 filter. + + int x = tileOutX * down + relUpX0; + int y = tileOutY * down + relUpY0; + int signX = x + p.sOfs.x; + int signY = y + p.sOfs.y; + int signZ = blockIdx.z + p.blockZofs; + int signXb = signX >> 2; + index_t si = signXb + p.sShape.x * (signY + (index_t)p.sShape.y * signZ); + v *= (scalar_t)((float)up * (float)up * p.gain); + + if (signWrite) + { + if (!enableWriteSkip) + { + // Determine and write sign. + uint32_t s = 0; + uint32_t signXbit = (1u << signXo); + if (v < 0.f) + { + s = signXbit; + v *= p.slope; + } + if (fabsf(v) > p.clamp) + { + s = signXbit * 2; + v = InternalType::clamp(v, p.clamp); + } + if ((uint32_t)signXb < p.swLimit && (uint32_t)signY < p.sShape.y && signY >= minY) + { + s += __shfl_xor_sync(groupMask, s, 1); // Coalesce. + s += __shfl_xor_sync(groupMask, s, 2); // Coalesce. + p.s[si] = s; // Write. + } + } + else + { + // Determine and write sign. + if ((uint32_t)signXb < p.swLimit && (uint32_t)signY < p.sShape.y && signY >= minY) + { + uint32_t s = 0; + uint32_t signXbit = (1u << signXo); + if (v < 0.f) + { + s = signXbit; + v *= p.slope; + } + if (fabsf(v) > p.clamp) + { + s = signXbit * 2; + v = InternalType::clamp(v, p.clamp); + } + s += __shfl_xor_sync(groupMask, s, 1); // Coalesce. + s += __shfl_xor_sync(groupMask, s, 2); // Coalesce. + p.s[si] = s; // Write. + } + else + { + // Just compute the value. + if (v < 0.f) v *= p.slope; + v = InternalType::clamp(v, p.clamp); + } + } + } + else if (signRead) + { + // Read sign and apply if within sign tensor bounds. + if ((uint32_t)signXb < p.swLimit && (uint32_t)signY < p.sShape.y) + { + int s = p.s[si]; + s >>= signXo; + if (s & 1) v *= p.slope; + if (s & 2) v = 0.f; + } + } + else // Forward pass with no sign write. + { + if (v < 0.f) v *= p.slope; + v = InternalType::clamp(v, p.clamp); + } + + if (!downInline) // Write into temporary buffer. + s_tileUpXY[idx] = v; + else if ((uint32_t)x < p.yShape.x && (uint32_t)y < p.yShape.y) // Write directly into output buffer + *((T*)((char*)p.y + (x * get_stride(p.yStride.x) + y * get_stride(p.yStride.y) + mapOfsOut))) = (T)(v * (scalar_t)c_fd[0]); + } + } + } + + // Downsampling. + if (filterMode == MODE_SUSD || filterMode == MODE_FUSD) + { + // Horizontal downsampling. + __syncthreads(); + if (down == 4 && tileOutW % 4 == 0) + { + // Calculate 4 pixels at a time. + for (int idx = threadIdx.x * 4; idx < tileOutW * tileUpH; idx += blockDim.x * 4) + { + int relOutX0, relUpY; + fast_div_mod(relOutX0, relUpY, idx); + int relUpX0 = relOutX0 * down; + int src0 = relUpY * tileUpW + relUpX0; + vec4_t v = InternalType::zero_vec4(); + #pragma unroll + for (int step = 0; step < fdSize; step++) + { + v.x += s_tileUpXY[src0 + 0 + step] * (scalar_t)c_fd[step]; + v.y += s_tileUpXY[src0 + 4 + step] * (scalar_t)c_fd[step]; + v.z += s_tileUpXY[src0 + 8 + step] * (scalar_t)c_fd[step]; + v.w += s_tileUpXY[src0 + 12 + step] * (scalar_t)c_fd[step]; + } + s_tileDownX[idx+0] = v.x; + s_tileDownX[idx+1] = v.y; + s_tileDownX[idx+2] = v.z; + s_tileDownX[idx+3] = v.w; + } + } + else if ((down == 2 || down == 4) && (tileOutW % 2 == 0)) + { + // Calculate 2 pixels at a time. + for (int idx = threadIdx.x * 2; idx < tileOutW * tileUpH; idx += blockDim.x * 2) + { + int relOutX0, relUpY; + fast_div_mod(relOutX0, relUpY, idx); + int relUpX0 = relOutX0 * down; + int src0 = relUpY * tileUpW + relUpX0; + vec2_t v = InternalType::zero_vec2(); + #pragma unroll + for (int step = 0; step < fdSize; step++) + { + v.x += s_tileUpXY[src0 + 0 + step] * (scalar_t)c_fd[step]; + v.y += s_tileUpXY[src0 + down + step] * (scalar_t)c_fd[step]; + } + s_tileDownX[idx+0] = v.x; + s_tileDownX[idx+1] = v.y; + } + } + else + { + // Calculate 1 pixel at a time. + for (int idx = threadIdx.x; idx < tileOutW * tileUpH; idx += blockDim.x) + { + int relOutX0, relUpY; + fast_div_mod(relOutX0, relUpY, idx); + int relUpX0 = relOutX0 * down; + int src = relUpY * tileUpW + relUpX0; + scalar_t v = 0.f; + #pragma unroll + for (int step = 0; step < fdSize; step++) + v += s_tileUpXY[src + step] * (scalar_t)c_fd[step]; + s_tileDownX[idx] = v; + } + } + + // Vertical downsampling & store output tile. + __syncthreads(); + for (int idx = threadIdx.x; idx < tileOutW * tileOutH; idx += blockDim.x) + { + int relOutX, relOutY0; + fast_div_mod(relOutX, relOutY0, idx); + int relUpY0 = relOutY0 * down; + int src0 = relUpY0 * tileOutW + relOutX; + scalar_t v = 0; + #pragma unroll + for (int step = 0; step < fdSize; step++) + v += s_tileDownX[src0 + step * tileOutW] * (scalar_t)c_fd[step]; + + int outX = tileOutX + relOutX; + int outY = tileOutY + relOutY0; + + if (outX < p.yShape.x & outY < p.yShape.y) + *((T*)((char*)p.y + (outX * get_stride(p.yStride.x) + outY * get_stride(p.yStride.y) + mapOfsOut))) = (T)v; + } + } + else if (filterMode == MODE_SUFD || filterMode == MODE_FUFD) + { + // Full downsampling filter. + if (down == 2) + { + // 2-wide. + __syncthreads(); + for (int idx = threadIdx.x * 2; idx < tileOutW * tileOutH; idx += blockDim.x * 2) + { + int relOutX0, relOutY0; + fast_div_mod(relOutX0, relOutY0, idx); + int relUpX0 = relOutX0 * down; + int relUpY0 = relOutY0 * down; + int src0 = relUpY0 * tileUpW + relUpX0; + vec2_t v = InternalType::zero_vec2(); + #pragma unroll + for (int sy = 0; sy < fdSize; sy++) + #pragma unroll + for (int sx = 0; sx < fdSize; sx++) + { + v.x += s_tileUpXY[src0 + 0 + sx + sy * tileUpW] * (scalar_t)c_fd[sx + sy * MAX_FILTER_SIZE]; + v.y += s_tileUpXY[src0 + 2 + sx + sy * tileUpW] * (scalar_t)c_fd[sx + sy * MAX_FILTER_SIZE]; + } + + int outX = tileOutX + relOutX0; + int outY = tileOutY + relOutY0; + if ((uint32_t)outY < p.yShape.y) + { + index_t ofs = outX * get_stride(p.yStride.x) + outY * get_stride(p.yStride.y) + mapOfsOut; + if (outX + 0 < p.yShape.x) *((T*)((char*)p.y + ofs)) = (T)v.x; + if (outX + 1 < p.yShape.x) *((T*)((char*)p.y + ofs + get_stride(p.yStride.x))) = (T)v.y; + } + } + } + else if (down == 1 && !downInline) + { + // Thread per pixel. + __syncthreads(); + for (int idx = threadIdx.x; idx < tileOutW * tileOutH; idx += blockDim.x) + { + int relOutX0, relOutY0; + fast_div_mod(relOutX0, relOutY0, idx); + scalar_t v = s_tileUpXY[idx] * (scalar_t)c_fd[0]; // 1x1 filter. + + int outX = tileOutX + relOutX0; + int outY = tileOutY + relOutY0; + if ((uint32_t)outX < p.yShape.x && (uint32_t)outY < p.yShape.y) + *((T*)((char*)p.y + (outX * get_stride(p.yStride.x) + outY * get_stride(p.yStride.y) + mapOfsOut))) = (T)v; + } + } + } + + if (!enableXrep) + break; + } +} + +//------------------------------------------------------------------------ +// Compute activation function and signs for upsampled data tensor, modifying data tensor in-place. Used for accelerating the generic variant. +// Sign tensor is known to be contiguous, and p.x and p.s have the same z, w dimensions. 64-bit indexing is always used. + +template +static __global__ void filtered_lrelu_act_kernel(filtered_lrelu_act_kernel_params p) +{ + typedef typename InternalType::scalar_t scalar_t; + + // Indexing. + int32_t x = threadIdx.x + blockIdx.x * blockDim.x; + int32_t ymax = signWrite ? p.sShape.y : p.xShape.y; + int32_t qmax = p.xShape.z * p.xShape.w; // Combined minibatch*channel maximum index. + + // Loop to accommodate oversized tensors. + for (int32_t q = blockIdx.z; q < qmax; q += gridDim.z) + for (int32_t y = blockIdx.y; y < ymax; y += gridDim.y) + { + // Extract z and w (channel, minibatch index). + int32_t w = q / p.xShape.z; + int32_t z = q - w * p.xShape.z; + + // Choose behavior based on sign read/write mode. + if (signWrite) + { + // Process value if in p.x. + uint32_t s = 0; + if (x < p.xShape.x && y < p.xShape.y) + { + int64_t ix = x * p.xStride.x + y * p.xStride.y + z * p.xStride.z + w * p.xStride.w; + T* pv = ((T*)p.x) + ix; + scalar_t v = (scalar_t)(*pv); + + // Gain, LReLU, clamp. + v *= p.gain; + if (v < 0.f) + { + v *= p.slope; + s = 1; // Sign. + } + if (fabsf(v) > p.clamp) + { + v = InternalType::clamp(v, p.clamp); + s = 2; // Clamp. + } + + *pv = (T)v; // Write value. + } + + // Coalesce into threads 0 and 16 of warp. + uint32_t m = (threadIdx.x & 16) ? 0xffff0000u : 0x0000ffffu; + s <<= ((threadIdx.x & 15) << 1); // Shift into place. + s |= __shfl_xor_sync(m, s, 1); // Distribute. + s |= __shfl_xor_sync(m, s, 2); + s |= __shfl_xor_sync(m, s, 4); + s |= __shfl_xor_sync(m, s, 8); + + // Write signs if leader and in p.s. + if (!(threadIdx.x & 15) && x < p.sShape.x) // y is always in. + { + uint64_t is = x + p.sShape.x * (y + (int64_t)p.sShape.y * q); // Contiguous. + ((uint32_t*)p.s)[is >> 4] = s; + } + } + else if (signRead) + { + // Process value if in p.x. + if (x < p.xShape.x) // y is always in. + { + int64_t ix = x * p.xStride.x + y * p.xStride.y + z * p.xStride.z + w * p.xStride.w; + T* pv = ((T*)p.x) + ix; + scalar_t v = (scalar_t)(*pv); + v *= p.gain; + + // Apply sign buffer offset. + uint32_t sx = x + p.sOfs.x; + uint32_t sy = y + p.sOfs.y; + + // Read and apply signs if we land inside valid region of sign buffer. + if (sx < p.sShape.x && sy < p.sShape.y) + { + uint64_t is = (sx >> 2) + (p.sShape.x >> 2) * (sy + (uint64_t)p.sShape.y * q); // Contiguous. + unsigned char s = p.s[is]; + s >>= (sx & 3) << 1; // Shift into place. + if (s & 1) // Sign? + v *= p.slope; + if (s & 2) // Clamp? + v = 0.f; + } + + *pv = (T)v; // Write value. + } + } + else + { + // Forward pass with no sign write. Process value if in p.x. + if (x < p.xShape.x) // y is always in. + { + int64_t ix = x * p.xStride.x + y * p.xStride.y + z * p.xStride.z + w * p.xStride.w; + T* pv = ((T*)p.x) + ix; + scalar_t v = (scalar_t)(*pv); + v *= p.gain; + if (v < 0.f) + v *= p.slope; + if (fabsf(v) > p.clamp) + v = InternalType::clamp(v, p.clamp); + *pv = (T)v; // Write value. + } + } + } +} + +template void* choose_filtered_lrelu_act_kernel(void) +{ + return (void*)filtered_lrelu_act_kernel; +} + +//------------------------------------------------------------------------ +// CUDA kernel selection. + +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB) +{ + filtered_lrelu_kernel_spec s = { 0 }; + + // Return the first matching kernel. +#define CASE(SH, U, FU, D, FD, MODE, TW, TH, W, XR, WS) \ + if (sharedKB >= SH) \ + if ((p.fuShape.y == 0 && (MODE == MODE_SUSD || MODE == MODE_SUFD)) || (p.fuShape.y > 0 && (MODE == MODE_FUSD || MODE == MODE_FUFD))) \ + if ((p.fdShape.y == 0 && (MODE == MODE_SUSD || MODE == MODE_FUSD)) || (p.fdShape.y > 0 && (MODE == MODE_SUFD || MODE == MODE_FUFD))) \ + if (p.up == U && p.fuShape.x <= FU && p.fuShape.y <= FU && p.down == D && p.fdShape.x <= FD && p.fdShape.y <= FD) \ + { \ + static_assert((D*TW % 4) == 0, "down * tileWidth must be divisible by 4"); \ + static_assert(FU % U == 0, "upscaling filter size must be multiple of upscaling factor"); \ + static_assert(FD % D == 0, "downscaling filter size must be multiple of downscaling factor"); \ + s.setup = (void*)setup_filters_kernel; \ + s.exec = (void*)filtered_lrelu_kernel; \ + s.tileOut = make_int2(TW, TH); \ + s.numWarps = W; \ + s.xrep = XR; \ + s.dynamicSharedKB = (SH == 48) ? 0 : SH; \ + return s; \ + } + + // Launch parameters for various kernel specializations. + // Small filters must be listed before large filters, otherwise the kernel for larger filter will always match first. + // Kernels that use more shared memory must be listed before those that use less, for the same reason. + + CASE(/*sharedKB*/48, /*up,fu*/1,1, /*down,fd*/1,1, /*mode*/MODE_FUFD, /*tw,th,warps,xrep,wskip*/64, 178, 32, 0, 0) // 1t-upf1-downf1 + CASE(/*sharedKB*/48, /*up,fu*/2,8, /*down,fd*/1,1, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/152, 95, 16, 0, 0) // 4t-ups2-downf1 + CASE(/*sharedKB*/48, /*up,fu*/1,1, /*down,fd*/2,8, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/56, 22, 16, 0, 0) // 4t-upf1-downs2 + CASE(/*sharedKB*/48, /*up,fu*/2,8, /*down,fd*/2,8, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/56, 29, 16, 11, 0) // 4t-ups2-downs2 + CASE(/*sharedKB*/48, /*up,fu*/2,8, /*down,fd*/2,8, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/60, 28, 16, 0, 0) // 4t-upf2-downs2 + CASE(/*sharedKB*/48, /*up,fu*/2,8, /*down,fd*/2,8, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/56, 28, 16, 0, 0) // 4t-ups2-downf2 + CASE(/*sharedKB*/48, /*up,fu*/4,16, /*down,fd*/2,8, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/56, 31, 16, 11, 0) // 4t-ups4-downs2 + CASE(/*sharedKB*/48, /*up,fu*/4,16, /*down,fd*/2,8, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/56, 36, 16, 0, 0) // 4t-ups4-downf2 + CASE(/*sharedKB*/48, /*up,fu*/2,8, /*down,fd*/4,16, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/16, 22, 16, 12, 0) // 4t-ups2-downs4 + CASE(/*sharedKB*/48, /*up,fu*/2,8, /*down,fd*/4,16, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/29, 15, 16, 0, 0) // 4t-upf2-downs4 + CASE(/*sharedKB*/48, /*up,fu*/2,12, /*down,fd*/1,1, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/96, 150, 28, 0, 0) // 6t-ups2-downf1 + CASE(/*sharedKB*/48, /*up,fu*/1,1, /*down,fd*/2,12, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/32, 35, 24, 0, 0) // 6t-upf1-downs2 + CASE(/*sharedKB*/48, /*up,fu*/2,12, /*down,fd*/2,12, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/32, 46, 16, 10, 0) // 6t-ups2-downs2 + CASE(/*sharedKB*/48, /*up,fu*/2,12, /*down,fd*/2,12, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/58, 28, 24, 8, 0) // 6t-upf2-downs2 + CASE(/*sharedKB*/48, /*up,fu*/2,12, /*down,fd*/2,12, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/52, 28, 16, 0, 0) // 6t-ups2-downf2 + CASE(/*sharedKB*/48, /*up,fu*/4,24, /*down,fd*/2,12, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/32, 51, 16, 5, 0) // 6t-ups4-downs2 + CASE(/*sharedKB*/48, /*up,fu*/4,24, /*down,fd*/2,12, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/32, 56, 16, 6, 0) // 6t-ups4-downf2 + CASE(/*sharedKB*/48, /*up,fu*/2,12, /*down,fd*/4,24, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/16, 18, 16, 12, 0) // 6t-ups2-downs4 + CASE(/*sharedKB*/96, /*up,fu*/2,12, /*down,fd*/4,24, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/27, 31, 32, 6, 0) // 6t-upf2-downs4 96kB + CASE(/*sharedKB*/48, /*up,fu*/2,12, /*down,fd*/4,24, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/27, 13, 24, 0, 0) // 6t-upf2-downs4 + CASE(/*sharedKB*/48, /*up,fu*/2,16, /*down,fd*/1,1, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/148, 89, 24, 0, 0) // 8t-ups2-downf1 + CASE(/*sharedKB*/48, /*up,fu*/1,1, /*down,fd*/2,16, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/32, 31, 16, 5, 0) // 8t-upf1-downs2 + CASE(/*sharedKB*/48, /*up,fu*/2,16, /*down,fd*/2,16, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/32, 41, 16, 9, 0) // 8t-ups2-downs2 + CASE(/*sharedKB*/48, /*up,fu*/2,16, /*down,fd*/2,16, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/56, 26, 24, 0, 0) // 8t-upf2-downs2 + CASE(/*sharedKB*/48, /*up,fu*/2,16, /*down,fd*/2,16, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/32, 40, 16, 0, 0) // 8t-ups2-downf2 + CASE(/*sharedKB*/48, /*up,fu*/4,32, /*down,fd*/2,16, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/32, 46, 24, 5, 0) // 8t-ups4-downs2 + CASE(/*sharedKB*/48, /*up,fu*/4,32, /*down,fd*/2,16, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/32, 50, 16, 0, 0) // 8t-ups4-downf2 + CASE(/*sharedKB*/96, /*up,fu*/2,16, /*down,fd*/4,32, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/24, 24, 32, 12, 1) // 8t-ups2-downs4 96kB + CASE(/*sharedKB*/48, /*up,fu*/2,16, /*down,fd*/4,32, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/16, 13, 16, 10, 1) // 8t-ups2-downs4 + CASE(/*sharedKB*/96, /*up,fu*/2,16, /*down,fd*/4,32, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/25, 28, 28, 4, 0) // 8t-upf2-downs4 96kB + CASE(/*sharedKB*/48, /*up,fu*/2,16, /*down,fd*/4,32, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/25, 10, 24, 0, 0) // 8t-upf2-downs4 + + #undef CASE + return s; // No kernel found. +} + +//------------------------------------------------------------------------ diff --git a/torch_utils/ops/filtered_lrelu.h b/torch_utils/ops/filtered_lrelu.h new file mode 100644 index 0000000000000000000000000000000000000000..2c403e3f275f472315662321cad54dd0dbc56d00 --- /dev/null +++ b/torch_utils/ops/filtered_lrelu.h @@ -0,0 +1,90 @@ +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// +// NVIDIA CORPORATION and its licensors retain all intellectual property +// and proprietary rights in and to this software, related documentation +// and any modifications thereto. Any use, reproduction, disclosure or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA CORPORATION is strictly prohibited. + +#include + +//------------------------------------------------------------------------ +// CUDA kernel parameters. + +struct filtered_lrelu_kernel_params +{ + // These parameters decide which kernel to use. + int up; // upsampling ratio (1, 2, 4) + int down; // downsampling ratio (1, 2, 4) + int2 fuShape; // [size, 1] | [size, size] + int2 fdShape; // [size, 1] | [size, size] + + int _dummy; // Alignment. + + // Rest of the parameters. + const void* x; // Input tensor. + void* y; // Output tensor. + const void* b; // Bias tensor. + unsigned char* s; // Sign tensor in/out. NULL if unused. + const float* fu; // Upsampling filter. + const float* fd; // Downsampling filter. + + int2 pad0; // Left/top padding. + float gain; // Additional gain factor. + float slope; // Leaky ReLU slope on negative side. + float clamp; // Clamp after nonlinearity. + int flip; // Filter kernel flip for gradient computation. + + int tilesXdim; // Original number of horizontal output tiles. + int tilesXrep; // Number of horizontal tiles per CTA. + int blockZofs; // Block z offset to support large minibatch, channel dimensions. + + int4 xShape; // [width, height, channel, batch] + int4 yShape; // [width, height, channel, batch] + int2 sShape; // [width, height] - width is in bytes. Contiguous. Zeros if unused. + int2 sOfs; // [ofs_x, ofs_y] - offset between upsampled data and sign tensor. + int swLimit; // Active width of sign tensor in bytes. + + longlong4 xStride; // Strides of all tensors except signs, same component order as shapes. + longlong4 yStride; // + int64_t bStride; // + longlong3 fuStride; // + longlong3 fdStride; // +}; + +struct filtered_lrelu_act_kernel_params +{ + void* x; // Input/output, modified in-place. + unsigned char* s; // Sign tensor in/out. NULL if unused. + + float gain; // Additional gain factor. + float slope; // Leaky ReLU slope on negative side. + float clamp; // Clamp after nonlinearity. + + int4 xShape; // [width, height, channel, batch] + longlong4 xStride; // Input/output tensor strides, same order as in shape. + int2 sShape; // [width, height] - width is in elements. Contiguous. Zeros if unused. + int2 sOfs; // [ofs_x, ofs_y] - offset between upsampled data and sign tensor. +}; + +//------------------------------------------------------------------------ +// CUDA kernel specialization. + +struct filtered_lrelu_kernel_spec +{ + void* setup; // Function for filter kernel setup. + void* exec; // Function for main operation. + int2 tileOut; // Width/height of launch tile. + int numWarps; // Number of warps per thread block, determines launch block size. + int xrep; // For processing multiple horizontal tiles per thread block. + int dynamicSharedKB; // How much dynamic shared memory the exec kernel wants. +}; + +//------------------------------------------------------------------------ +// CUDA kernel selection. + +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); +template void* choose_filtered_lrelu_act_kernel(void); +template cudaError_t copy_filters(cudaStream_t stream); + +//------------------------------------------------------------------------ diff --git a/torch_utils/ops/filtered_lrelu.py b/torch_utils/ops/filtered_lrelu.py new file mode 100644 index 0000000000000000000000000000000000000000..6106c917d1cbff4f1cf637390dd6ba0c597a830f --- /dev/null +++ b/torch_utils/ops/filtered_lrelu.py @@ -0,0 +1,274 @@ +# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# NVIDIA CORPORATION and its licensors retain all intellectual property +# and proprietary rights in and to this software, related documentation +# and any modifications thereto. Any use, reproduction, disclosure or +# distribution of this software and related documentation without an express +# license agreement from NVIDIA CORPORATION is strictly prohibited. + +import os +import numpy as np +import torch +import warnings + +from .. import custom_ops +from .. import misc +from . import upfirdn2d +from . import bias_act + +#---------------------------------------------------------------------------- + +_plugin = None + +def _init(): + global _plugin + if _plugin is None: + _plugin = custom_ops.get_plugin( + module_name='filtered_lrelu_plugin', + sources=['filtered_lrelu.cpp', 'filtered_lrelu_wr.cu', 'filtered_lrelu_rd.cu', 'filtered_lrelu_ns.cu'], + headers=['filtered_lrelu.h', 'filtered_lrelu.cu'], + source_dir=os.path.dirname(__file__), + extra_cuda_cflags=['--use_fast_math'], + ) + return True + +def _get_filter_size(f): + if f is None: + return 1, 1 + assert isinstance(f, torch.Tensor) + assert 1 <= f.ndim <= 2 + return f.shape[-1], f.shape[0] # width, height + +def _parse_padding(padding): + if isinstance(padding, int): + padding = [padding, padding] + assert isinstance(padding, (list, tuple)) + assert all(isinstance(x, (int, np.integer)) for x in padding) + padding = [int(x) for x in padding] + if len(padding) == 2: + px, py = padding + padding = [px, px, py, py] + px0, px1, py0, py1 = padding + return px0, px1, py0, py1 + +#---------------------------------------------------------------------------- + +def filtered_lrelu(x, fu=None, fd=None, b=None, up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False, impl='cuda'): + r"""Filtered leaky ReLU for a batch of 2D images. + + Performs the following sequence of operations for each channel: + + 1. Add channel-specific bias if provided (`b`). + + 2. Upsample the image by inserting N-1 zeros after each pixel (`up`). + + 3. Pad the image with the specified number of zeros on each side (`padding`). + Negative padding corresponds to cropping the image. + + 4. Convolve the image with the specified upsampling FIR filter (`fu`), shrinking it + so that the footprint of all output pixels lies within the input image. + + 5. Multiply each value by the provided gain factor (`gain`). + + 6. Apply leaky ReLU activation function to each value. + + 7. Clamp each value between -clamp and +clamp, if `clamp` parameter is provided. + + 8. Convolve the image with the specified downsampling FIR filter (`fd`), shrinking + it so that the footprint of all output pixels lies within the input image. + + 9. Downsample the image by keeping every Nth pixel (`down`). + + The fused op is considerably more efficient than performing the same calculation + using standard PyTorch ops. It supports gradients of arbitrary order. + + Args: + x: Float32/float16/float64 input tensor of the shape + `[batch_size, num_channels, in_height, in_width]`. + fu: Float32 upsampling FIR filter of the shape + `[filter_height, filter_width]` (non-separable), + `[filter_taps]` (separable), or + `None` (identity). + fd: Float32 downsampling FIR filter of the shape + `[filter_height, filter_width]` (non-separable), + `[filter_taps]` (separable), or + `None` (identity). + b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type + as `x`. The length of vector must must match the channel dimension of `x`. + up: Integer upsampling factor (default: 1). + down: Integer downsampling factor. (default: 1). + padding: Padding with respect to the upsampled image. Can be a single number + or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` + (default: 0). + gain: Overall scaling factor for signal magnitude (default: sqrt(2)). + slope: Slope on the negative side of leaky ReLU (default: 0.2). + clamp: Maximum magnitude for leaky ReLU output (default: None). + flip_filter: False = convolution, True = correlation (default: False). + impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). + + Returns: + Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. + """ + assert isinstance(x, torch.Tensor) + assert impl in ['ref', 'cuda'] + if impl == 'cuda' and x.device.type == 'cuda' and _init(): + return _filtered_lrelu_cuda(up=up, down=down, padding=padding, gain=gain, slope=slope, clamp=clamp, flip_filter=flip_filter).apply(x, fu, fd, b, None, 0, 0) + return _filtered_lrelu_ref(x, fu=fu, fd=fd, b=b, up=up, down=down, padding=padding, gain=gain, slope=slope, clamp=clamp, flip_filter=flip_filter) + +#---------------------------------------------------------------------------- + +@misc.profiled_function +def _filtered_lrelu_ref(x, fu=None, fd=None, b=None, up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False): + """Slow and memory-inefficient reference implementation of `filtered_lrelu()` using + existing `upfirdn2n()` and `bias_act()` ops. + """ + assert isinstance(x, torch.Tensor) and x.ndim == 4 + fu_w, fu_h = _get_filter_size(fu) + fd_w, fd_h = _get_filter_size(fd) + if b is not None: + assert isinstance(b, torch.Tensor) and b.dtype == x.dtype + misc.assert_shape(b, [x.shape[1]]) + assert isinstance(up, int) and up >= 1 + assert isinstance(down, int) and down >= 1 + px0, px1, py0, py1 = _parse_padding(padding) + assert gain == float(gain) and gain > 0 + assert slope == float(slope) and slope >= 0 + assert clamp is None or (clamp == float(clamp) and clamp >= 0) + + # Calculate output size. + batch_size, channels, in_h, in_w = x.shape + in_dtype = x.dtype + out_w = (in_w * up + (px0 + px1) - (fu_w - 1) - (fd_w - 1) + (down - 1)) // down + out_h = (in_h * up + (py0 + py1) - (fu_h - 1) - (fd_h - 1) + (down - 1)) // down + + # Compute using existing ops. + x = bias_act.bias_act(x=x, b=b) # Apply bias. + x = upfirdn2d.upfirdn2d(x=x, f=fu, up=up, padding=[px0, px1, py0, py1], gain=up**2, flip_filter=flip_filter) # Upsample. + x = bias_act.bias_act(x=x, act='lrelu', alpha=slope, gain=gain, clamp=clamp) # Bias, leaky ReLU, clamp. + x = upfirdn2d.upfirdn2d(x=x, f=fd, down=down, flip_filter=flip_filter) # Downsample. + + # Check output shape & dtype. + misc.assert_shape(x, [batch_size, channels, out_h, out_w]) + assert x.dtype == in_dtype + return x + +#---------------------------------------------------------------------------- + +_filtered_lrelu_cuda_cache = dict() + +def _filtered_lrelu_cuda(up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False): + """Fast CUDA implementation of `filtered_lrelu()` using custom ops. + """ + assert isinstance(up, int) and up >= 1 + assert isinstance(down, int) and down >= 1 + px0, px1, py0, py1 = _parse_padding(padding) + assert gain == float(gain) and gain > 0 + gain = float(gain) + assert slope == float(slope) and slope >= 0 + slope = float(slope) + assert clamp is None or (clamp == float(clamp) and clamp >= 0) + clamp = float(clamp if clamp is not None else 'inf') + + # Lookup from cache. + key = (up, down, px0, px1, py0, py1, gain, slope, clamp, flip_filter) + if key in _filtered_lrelu_cuda_cache: + return _filtered_lrelu_cuda_cache[key] + + # Forward op. + class FilteredLReluCuda(torch.autograd.Function): + @staticmethod + def forward(ctx, x, fu, fd, b, si, sx, sy): # pylint: disable=arguments-differ + assert isinstance(x, torch.Tensor) and x.ndim == 4 + + # Replace empty up/downsample kernels with full 1x1 kernels (faster than separable). + if fu is None: + fu = torch.ones([1, 1], dtype=torch.float32, device=x.device) + if fd is None: + fd = torch.ones([1, 1], dtype=torch.float32, device=x.device) + assert 1 <= fu.ndim <= 2 + assert 1 <= fd.ndim <= 2 + + # Replace separable 1x1 kernels with full 1x1 kernels when scale factor is 1. + if up == 1 and fu.ndim == 1 and fu.shape[0] == 1: + fu = fu.square()[None] + if down == 1 and fd.ndim == 1 and fd.shape[0] == 1: + fd = fd.square()[None] + + # Missing sign input tensor. + if si is None: + si = torch.empty([0]) + + # Missing bias tensor. + if b is None: + b = torch.zeros([x.shape[1]], dtype=x.dtype, device=x.device) + + # Construct internal sign tensor only if gradients are needed. + write_signs = (si.numel() == 0) and (x.requires_grad or b.requires_grad) + + # Warn if input storage strides are not in decreasing order due to e.g. channels-last layout. + strides = [x.stride(i) for i in range(x.ndim) if x.size(i) > 1] + if any(a < b for a, b in zip(strides[:-1], strides[1:])): + warnings.warn("low-performance memory layout detected in filtered_lrelu input", RuntimeWarning) + + # Call C++/Cuda plugin if datatype is supported. + if x.dtype in [torch.float16, torch.float32]: + if torch.cuda.current_stream(x.device) != torch.cuda.default_stream(x.device): + warnings.warn("filtered_lrelu called with non-default cuda stream but concurrent execution is not supported", RuntimeWarning) + y, so, return_code = _plugin.filtered_lrelu(x, fu, fd, b, si, up, down, px0, px1, py0, py1, sx, sy, gain, slope, clamp, flip_filter, write_signs) + else: + return_code = -1 + + # No Cuda kernel found? Fall back to generic implementation. Still more memory efficient than the reference implementation because + # only the bit-packed sign tensor is retained for gradient computation. + if return_code < 0: + warnings.warn("filtered_lrelu called with parameters that have no optimized CUDA kernel, using generic fallback", RuntimeWarning) + + y = x.add(b.unsqueeze(-1).unsqueeze(-1)) # Add bias. + y = upfirdn2d.upfirdn2d(x=y, f=fu, up=up, padding=[px0, px1, py0, py1], gain=up**2, flip_filter=flip_filter) # Upsample. + so = _plugin.filtered_lrelu_act_(y, si, sx, sy, gain, slope, clamp, write_signs) # Activation function and sign handling. Modifies y in-place. + y = upfirdn2d.upfirdn2d(x=y, f=fd, down=down, flip_filter=flip_filter) # Downsample. + + # Prepare for gradient computation. + ctx.save_for_backward(fu, fd, (si if si.numel() else so)) + ctx.x_shape = x.shape + ctx.y_shape = y.shape + ctx.s_ofs = sx, sy + return y + + @staticmethod + def backward(ctx, dy): # pylint: disable=arguments-differ + fu, fd, si = ctx.saved_tensors + _, _, xh, xw = ctx.x_shape + _, _, yh, yw = ctx.y_shape + sx, sy = ctx.s_ofs + dx = None # 0 + dfu = None; assert not ctx.needs_input_grad[1] + dfd = None; assert not ctx.needs_input_grad[2] + db = None # 3 + dsi = None; assert not ctx.needs_input_grad[4] + dsx = None; assert not ctx.needs_input_grad[5] + dsy = None; assert not ctx.needs_input_grad[6] + + if ctx.needs_input_grad[0] or ctx.needs_input_grad[3]: + pp = [ + (fu.shape[-1] - 1) + (fd.shape[-1] - 1) - px0, + xw * up - yw * down + px0 - (up - 1), + (fu.shape[0] - 1) + (fd.shape[0] - 1) - py0, + xh * up - yh * down + py0 - (up - 1), + ] + gg = gain * (up ** 2) / (down ** 2) + ff = (not flip_filter) + sx = sx - (fu.shape[-1] - 1) + px0 + sy = sy - (fu.shape[0] - 1) + py0 + dx = _filtered_lrelu_cuda(up=down, down=up, padding=pp, gain=gg, slope=slope, clamp=None, flip_filter=ff).apply(dy, fd, fu, None, si, sx, sy) + + if ctx.needs_input_grad[3]: + db = dx.sum([0, 2, 3]) + + return dx, dfu, dfd, db, dsi, dsx, dsy + + # Add to cache. + _filtered_lrelu_cuda_cache[key] = FilteredLReluCuda + return FilteredLReluCuda + +#---------------------------------------------------------------------------- diff --git a/torch_utils/ops/filtered_lrelu_ns.cu b/torch_utils/ops/filtered_lrelu_ns.cu new file mode 100644 index 0000000000000000000000000000000000000000..ef5d948c4fdf9cb0fe8a42f6268c61aeef6b2000 --- /dev/null +++ b/torch_utils/ops/filtered_lrelu_ns.cu @@ -0,0 +1,27 @@ +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// +// NVIDIA CORPORATION and its licensors retain all intellectual property +// and proprietary rights in and to this software, related documentation +// and any modifications thereto. Any use, reproduction, disclosure or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA CORPORATION is strictly prohibited. + +#include "filtered_lrelu.cu" + +// Template/kernel specializations for no signs mode (no gradients required). + +// Full op, 32-bit indexing. +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); + +// Full op, 64-bit indexing. +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); + +// Activation/signs only for generic variant. 64-bit indexing. +template void* choose_filtered_lrelu_act_kernel(void); +template void* choose_filtered_lrelu_act_kernel(void); +template void* choose_filtered_lrelu_act_kernel(void); + +// Copy filters to constant memory. +template cudaError_t copy_filters(cudaStream_t stream); diff --git a/torch_utils/ops/filtered_lrelu_rd.cu b/torch_utils/ops/filtered_lrelu_rd.cu new file mode 100644 index 0000000000000000000000000000000000000000..968347882e9aebd36204f67e201cd16226dd9132 --- /dev/null +++ b/torch_utils/ops/filtered_lrelu_rd.cu @@ -0,0 +1,27 @@ +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// +// NVIDIA CORPORATION and its licensors retain all intellectual property +// and proprietary rights in and to this software, related documentation +// and any modifications thereto. Any use, reproduction, disclosure or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA CORPORATION is strictly prohibited. + +#include "filtered_lrelu.cu" + +// Template/kernel specializations for sign read mode. + +// Full op, 32-bit indexing. +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); + +// Full op, 64-bit indexing. +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); + +// Activation/signs only for generic variant. 64-bit indexing. +template void* choose_filtered_lrelu_act_kernel(void); +template void* choose_filtered_lrelu_act_kernel(void); +template void* choose_filtered_lrelu_act_kernel(void); + +// Copy filters to constant memory. +template cudaError_t copy_filters(cudaStream_t stream); diff --git a/torch_utils/ops/filtered_lrelu_wr.cu b/torch_utils/ops/filtered_lrelu_wr.cu new file mode 100644 index 0000000000000000000000000000000000000000..a4c6a24aae908bc07248f7ff710cbd1a11a38bb1 --- /dev/null +++ b/torch_utils/ops/filtered_lrelu_wr.cu @@ -0,0 +1,27 @@ +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// +// NVIDIA CORPORATION and its licensors retain all intellectual property +// and proprietary rights in and to this software, related documentation +// and any modifications thereto. Any use, reproduction, disclosure or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA CORPORATION is strictly prohibited. + +#include "filtered_lrelu.cu" + +// Template/kernel specializations for sign write mode. + +// Full op, 32-bit indexing. +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); + +// Full op, 64-bit indexing. +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); + +// Activation/signs only for generic variant. 64-bit indexing. +template void* choose_filtered_lrelu_act_kernel(void); +template void* choose_filtered_lrelu_act_kernel(void); +template void* choose_filtered_lrelu_act_kernel(void); + +// Copy filters to constant memory. +template cudaError_t copy_filters(cudaStream_t stream); diff --git a/torch_utils/ops/fma.py b/torch_utils/ops/fma.py new file mode 100644 index 0000000000000000000000000000000000000000..51a45dfa0829987e8ee5214663e068cb3af2a8b9 --- /dev/null +++ b/torch_utils/ops/fma.py @@ -0,0 +1,60 @@ +# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# NVIDIA CORPORATION and its licensors retain all intellectual property +# and proprietary rights in and to this software, related documentation +# and any modifications thereto. Any use, reproduction, disclosure or +# distribution of this software and related documentation without an express +# license agreement from NVIDIA CORPORATION is strictly prohibited. + +"""Fused multiply-add, with slightly faster gradients than `torch.addcmul()`.""" + +import torch + +#---------------------------------------------------------------------------- + +def fma(a, b, c): # => a * b + c + return _FusedMultiplyAdd.apply(a, b, c) + +#---------------------------------------------------------------------------- + +class _FusedMultiplyAdd(torch.autograd.Function): # a * b + c + @staticmethod + def forward(ctx, a, b, c): # pylint: disable=arguments-differ + out = torch.addcmul(c, a, b) + ctx.save_for_backward(a, b) + ctx.c_shape = c.shape + return out + + @staticmethod + def backward(ctx, dout): # pylint: disable=arguments-differ + a, b = ctx.saved_tensors + c_shape = ctx.c_shape + da = None + db = None + dc = None + + if ctx.needs_input_grad[0]: + da = _unbroadcast(dout * b, a.shape) + + if ctx.needs_input_grad[1]: + db = _unbroadcast(dout * a, b.shape) + + if ctx.needs_input_grad[2]: + dc = _unbroadcast(dout, c_shape) + + return da, db, dc + +#---------------------------------------------------------------------------- + +def _unbroadcast(x, shape): + extra_dims = x.ndim - len(shape) + assert extra_dims >= 0 + dim = [i for i in range(x.ndim) if x.shape[i] > 1 and (i < extra_dims or shape[i - extra_dims] == 1)] + if len(dim): + x = x.sum(dim=dim, keepdim=True) + if extra_dims: + x = x.reshape(-1, *x.shape[extra_dims+1:]) + assert x.shape == shape + return x + +#---------------------------------------------------------------------------- diff --git a/torch_utils/ops/grid_sample_gradfix.py b/torch_utils/ops/grid_sample_gradfix.py new file mode 100644 index 0000000000000000000000000000000000000000..979ee831b232c68b8c271be9e376c70c57a31b02 --- /dev/null +++ b/torch_utils/ops/grid_sample_gradfix.py @@ -0,0 +1,77 @@ +# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# NVIDIA CORPORATION and its licensors retain all intellectual property +# and proprietary rights in and to this software, related documentation +# and any modifications thereto. Any use, reproduction, disclosure or +# distribution of this software and related documentation without an express +# license agreement from NVIDIA CORPORATION is strictly prohibited. + +"""Custom replacement for `torch.nn.functional.grid_sample` that +supports arbitrarily high order gradients between the input and output. +Only works on 2D images and assumes +`mode='bilinear'`, `padding_mode='zeros'`, `align_corners=False`.""" + +import torch + +# pylint: disable=redefined-builtin +# pylint: disable=arguments-differ +# pylint: disable=protected-access + +#---------------------------------------------------------------------------- + +enabled = False # Enable the custom op by setting this to true. + +#---------------------------------------------------------------------------- + +def grid_sample(input, grid): + if _should_use_custom_op(): + return _GridSample2dForward.apply(input, grid) + return torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False) + +#---------------------------------------------------------------------------- + +def _should_use_custom_op(): + return enabled + +#---------------------------------------------------------------------------- + +class _GridSample2dForward(torch.autograd.Function): + @staticmethod + def forward(ctx, input, grid): + assert input.ndim == 4 + assert grid.ndim == 4 + output = torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False) + ctx.save_for_backward(input, grid) + return output + + @staticmethod + def backward(ctx, grad_output): + input, grid = ctx.saved_tensors + grad_input, grad_grid = _GridSample2dBackward.apply(grad_output, input, grid) + return grad_input, grad_grid + +#---------------------------------------------------------------------------- + +class _GridSample2dBackward(torch.autograd.Function): + @staticmethod + def forward(ctx, grad_output, input, grid): + op = torch._C._jit_get_operation('aten::grid_sampler_2d_backward') + grad_input, grad_grid = op(grad_output, input, grid, 0, 0, False) + ctx.save_for_backward(grid) + return grad_input, grad_grid + + @staticmethod + def backward(ctx, grad2_grad_input, grad2_grad_grid): + _ = grad2_grad_grid # unused + grid, = ctx.saved_tensors + grad2_grad_output = None + grad2_input = None + grad2_grid = None + + if ctx.needs_input_grad[0]: + grad2_grad_output = _GridSample2dForward.apply(grad2_grad_input, grid) + + assert not ctx.needs_input_grad[2] + return grad2_grad_output, grad2_input, grad2_grid + +#---------------------------------------------------------------------------- diff --git a/torch_utils/ops/upfirdn2d.cpp b/torch_utils/ops/upfirdn2d.cpp new file mode 100644 index 0000000000000000000000000000000000000000..44fa337d8d4c34dfa010a59cd27d86857db671aa --- /dev/null +++ b/torch_utils/ops/upfirdn2d.cpp @@ -0,0 +1,107 @@ +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// +// NVIDIA CORPORATION and its licensors retain all intellectual property +// and proprietary rights in and to this software, related documentation +// and any modifications thereto. Any use, reproduction, disclosure or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA CORPORATION is strictly prohibited. + +#include +#include +#include +#include "upfirdn2d.h" + +//------------------------------------------------------------------------ + +static torch::Tensor upfirdn2d(torch::Tensor x, torch::Tensor f, int upx, int upy, int downx, int downy, int padx0, int padx1, int pady0, int pady1, bool flip, float gain) +{ + // Validate arguments. + TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device"); + TORCH_CHECK(f.device() == x.device(), "f must reside on the same device as x"); + TORCH_CHECK(f.dtype() == torch::kFloat, "f must be float32"); + TORCH_CHECK(x.numel() <= INT_MAX, "x is too large"); + TORCH_CHECK(f.numel() <= INT_MAX, "f is too large"); + TORCH_CHECK(x.numel() > 0, "x has zero size"); + TORCH_CHECK(f.numel() > 0, "f has zero size"); + TORCH_CHECK(x.dim() == 4, "x must be rank 4"); + TORCH_CHECK(f.dim() == 2, "f must be rank 2"); + TORCH_CHECK((x.size(0)-1)*x.stride(0) + (x.size(1)-1)*x.stride(1) + (x.size(2)-1)*x.stride(2) + (x.size(3)-1)*x.stride(3) <= INT_MAX, "x memory footprint is too large"); + TORCH_CHECK(f.size(0) >= 1 && f.size(1) >= 1, "f must be at least 1x1"); + TORCH_CHECK(upx >= 1 && upy >= 1, "upsampling factor must be at least 1"); + TORCH_CHECK(downx >= 1 && downy >= 1, "downsampling factor must be at least 1"); + + // Create output tensor. + const at::cuda::OptionalCUDAGuard device_guard(device_of(x)); + int outW = ((int)x.size(3) * upx + padx0 + padx1 - (int)f.size(1) + downx) / downx; + int outH = ((int)x.size(2) * upy + pady0 + pady1 - (int)f.size(0) + downy) / downy; + TORCH_CHECK(outW >= 1 && outH >= 1, "output must be at least 1x1"); + torch::Tensor y = torch::empty({x.size(0), x.size(1), outH, outW}, x.options(), x.suggest_memory_format()); + TORCH_CHECK(y.numel() <= INT_MAX, "output is too large"); + TORCH_CHECK((y.size(0)-1)*y.stride(0) + (y.size(1)-1)*y.stride(1) + (y.size(2)-1)*y.stride(2) + (y.size(3)-1)*y.stride(3) <= INT_MAX, "output memory footprint is too large"); + + // Initialize CUDA kernel parameters. + upfirdn2d_kernel_params p; + p.x = x.data_ptr(); + p.f = f.data_ptr(); + p.y = y.data_ptr(); + p.up = make_int2(upx, upy); + p.down = make_int2(downx, downy); + p.pad0 = make_int2(padx0, pady0); + p.flip = (flip) ? 1 : 0; + p.gain = gain; + p.inSize = make_int4((int)x.size(3), (int)x.size(2), (int)x.size(1), (int)x.size(0)); + p.inStride = make_int4((int)x.stride(3), (int)x.stride(2), (int)x.stride(1), (int)x.stride(0)); + p.filterSize = make_int2((int)f.size(1), (int)f.size(0)); + p.filterStride = make_int2((int)f.stride(1), (int)f.stride(0)); + p.outSize = make_int4((int)y.size(3), (int)y.size(2), (int)y.size(1), (int)y.size(0)); + p.outStride = make_int4((int)y.stride(3), (int)y.stride(2), (int)y.stride(1), (int)y.stride(0)); + p.sizeMajor = (p.inStride.z == 1) ? p.inSize.w : p.inSize.w * p.inSize.z; + p.sizeMinor = (p.inStride.z == 1) ? p.inSize.z : 1; + + // Choose CUDA kernel. + upfirdn2d_kernel_spec spec; + AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&] + { + spec = choose_upfirdn2d_kernel(p); + }); + + // Set looping options. + p.loopMajor = (p.sizeMajor - 1) / 16384 + 1; + p.loopMinor = spec.loopMinor; + p.loopX = spec.loopX; + p.launchMinor = (p.sizeMinor - 1) / p.loopMinor + 1; + p.launchMajor = (p.sizeMajor - 1) / p.loopMajor + 1; + + // Compute grid size. + dim3 blockSize, gridSize; + if (spec.tileOutW < 0) // large + { + blockSize = dim3(4, 32, 1); + gridSize = dim3( + ((p.outSize.y - 1) / blockSize.x + 1) * p.launchMinor, + (p.outSize.x - 1) / (blockSize.y * p.loopX) + 1, + p.launchMajor); + } + else // small + { + blockSize = dim3(256, 1, 1); + gridSize = dim3( + ((p.outSize.y - 1) / spec.tileOutH + 1) * p.launchMinor, + (p.outSize.x - 1) / (spec.tileOutW * p.loopX) + 1, + p.launchMajor); + } + + // Launch CUDA kernel. + void* args[] = {&p}; + AT_CUDA_CHECK(cudaLaunchKernel(spec.kernel, gridSize, blockSize, args, 0, at::cuda::getCurrentCUDAStream())); + return y; +} + +//------------------------------------------------------------------------ + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("upfirdn2d", &upfirdn2d); +} + +//------------------------------------------------------------------------ diff --git a/torch_utils/ops/upfirdn2d.cu b/torch_utils/ops/upfirdn2d.cu new file mode 100644 index 0000000000000000000000000000000000000000..3a33e31bbb1bbc1cd02ee7d2ede3943917f3906e --- /dev/null +++ b/torch_utils/ops/upfirdn2d.cu @@ -0,0 +1,384 @@ +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// +// NVIDIA CORPORATION and its licensors retain all intellectual property +// and proprietary rights in and to this software, related documentation +// and any modifications thereto. Any use, reproduction, disclosure or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA CORPORATION is strictly prohibited. + +#include +#include "upfirdn2d.h" + +//------------------------------------------------------------------------ +// Helpers. + +template struct InternalType; +template <> struct InternalType { typedef double scalar_t; }; +template <> struct InternalType { typedef float scalar_t; }; +template <> struct InternalType { typedef float scalar_t; }; + +static __device__ __forceinline__ int floor_div(int a, int b) +{ + int t = 1 - a / b; + return (a + t * b) / b - t; +} + +//------------------------------------------------------------------------ +// Generic CUDA implementation for large filters. + +template static __global__ void upfirdn2d_kernel_large(upfirdn2d_kernel_params p) +{ + typedef typename InternalType::scalar_t scalar_t; + + // Calculate thread index. + int minorBase = blockIdx.x * blockDim.x + threadIdx.x; + int outY = minorBase / p.launchMinor; + minorBase -= outY * p.launchMinor; + int outXBase = blockIdx.y * p.loopX * blockDim.y + threadIdx.y; + int majorBase = blockIdx.z * p.loopMajor; + if (outXBase >= p.outSize.x | outY >= p.outSize.y | majorBase >= p.sizeMajor) + return; + + // Setup Y receptive field. + int midY = outY * p.down.y + p.up.y - 1 - p.pad0.y; + int inY = min(max(floor_div(midY, p.up.y), 0), p.inSize.y); + int h = min(max(floor_div(midY + p.filterSize.y, p.up.y), 0), p.inSize.y) - inY; + int filterY = midY + p.filterSize.y - (inY + 1) * p.up.y; + if (p.flip) + filterY = p.filterSize.y - 1 - filterY; + + // Loop over major, minor, and X. + for (int majorIdx = 0, major = majorBase; majorIdx < p.loopMajor & major < p.sizeMajor; majorIdx++, major++) + for (int minorIdx = 0, minor = minorBase; minorIdx < p.loopMinor & minor < p.sizeMinor; minorIdx++, minor += p.launchMinor) + { + int nc = major * p.sizeMinor + minor; + int n = nc / p.inSize.z; + int c = nc - n * p.inSize.z; + for (int loopX = 0, outX = outXBase; loopX < p.loopX & outX < p.outSize.x; loopX++, outX += blockDim.y) + { + // Setup X receptive field. + int midX = outX * p.down.x + p.up.x - 1 - p.pad0.x; + int inX = min(max(floor_div(midX, p.up.x), 0), p.inSize.x); + int w = min(max(floor_div(midX + p.filterSize.x, p.up.x), 0), p.inSize.x) - inX; + int filterX = midX + p.filterSize.x - (inX + 1) * p.up.x; + if (p.flip) + filterX = p.filterSize.x - 1 - filterX; + + // Initialize pointers. + const T* xp = &((const T*)p.x)[inX * p.inStride.x + inY * p.inStride.y + c * p.inStride.z + n * p.inStride.w]; + const float* fp = &p.f[filterX * p.filterStride.x + filterY * p.filterStride.y]; + int filterStepX = ((p.flip) ? p.up.x : -p.up.x) * p.filterStride.x; + int filterStepY = ((p.flip) ? p.up.y : -p.up.y) * p.filterStride.y; + + // Inner loop. + scalar_t v = 0; + for (int y = 0; y < h; y++) + { + for (int x = 0; x < w; x++) + { + v += (scalar_t)(*xp) * (scalar_t)(*fp); + xp += p.inStride.x; + fp += filterStepX; + } + xp += p.inStride.y - w * p.inStride.x; + fp += filterStepY - w * filterStepX; + } + + // Store result. + v *= p.gain; + ((T*)p.y)[outX * p.outStride.x + outY * p.outStride.y + c * p.outStride.z + n * p.outStride.w] = (T)v; + } + } +} + +//------------------------------------------------------------------------ +// Specialized CUDA implementation for small filters. + +template +static __global__ void upfirdn2d_kernel_small(upfirdn2d_kernel_params p) +{ + typedef typename InternalType::scalar_t scalar_t; + const int tileInW = ((tileOutW - 1) * downx + filterW - 1) / upx + 1; + const int tileInH = ((tileOutH - 1) * downy + filterH - 1) / upy + 1; + __shared__ volatile scalar_t sf[filterH][filterW]; + __shared__ volatile scalar_t sx[tileInH][tileInW][loopMinor]; + + // Calculate tile index. + int minorBase = blockIdx.x; + int tileOutY = minorBase / p.launchMinor; + minorBase -= tileOutY * p.launchMinor; + minorBase *= loopMinor; + tileOutY *= tileOutH; + int tileOutXBase = blockIdx.y * p.loopX * tileOutW; + int majorBase = blockIdx.z * p.loopMajor; + if (tileOutXBase >= p.outSize.x | tileOutY >= p.outSize.y | majorBase >= p.sizeMajor) + return; + + // Load filter (flipped). + for (int tapIdx = threadIdx.x; tapIdx < filterH * filterW; tapIdx += blockDim.x) + { + int fy = tapIdx / filterW; + int fx = tapIdx - fy * filterW; + scalar_t v = 0; + if (fx < p.filterSize.x & fy < p.filterSize.y) + { + int ffx = (p.flip) ? fx : p.filterSize.x - 1 - fx; + int ffy = (p.flip) ? fy : p.filterSize.y - 1 - fy; + v = (scalar_t)p.f[ffx * p.filterStride.x + ffy * p.filterStride.y]; + } + sf[fy][fx] = v; + } + + // Loop over major and X. + for (int majorIdx = 0, major = majorBase; majorIdx < p.loopMajor & major < p.sizeMajor; majorIdx++, major++) + { + int baseNC = major * p.sizeMinor + minorBase; + int n = baseNC / p.inSize.z; + int baseC = baseNC - n * p.inSize.z; + for (int loopX = 0, tileOutX = tileOutXBase; loopX < p.loopX & tileOutX < p.outSize.x; loopX++, tileOutX += tileOutW) + { + // Load input pixels. + int tileMidX = tileOutX * downx + upx - 1 - p.pad0.x; + int tileMidY = tileOutY * downy + upy - 1 - p.pad0.y; + int tileInX = floor_div(tileMidX, upx); + int tileInY = floor_div(tileMidY, upy); + __syncthreads(); + for (int inIdx = threadIdx.x; inIdx < tileInH * tileInW * loopMinor; inIdx += blockDim.x) + { + int relC = inIdx; + int relInX = relC / loopMinor; + int relInY = relInX / tileInW; + relC -= relInX * loopMinor; + relInX -= relInY * tileInW; + int c = baseC + relC; + int inX = tileInX + relInX; + int inY = tileInY + relInY; + scalar_t v = 0; + if (inX >= 0 & inY >= 0 & inX < p.inSize.x & inY < p.inSize.y & c < p.inSize.z) + v = (scalar_t)((const T*)p.x)[inX * p.inStride.x + inY * p.inStride.y + c * p.inStride.z + n * p.inStride.w]; + sx[relInY][relInX][relC] = v; + } + + // Loop over output pixels. + __syncthreads(); + for (int outIdx = threadIdx.x; outIdx < tileOutH * tileOutW * loopMinor; outIdx += blockDim.x) + { + int relC = outIdx; + int relOutX = relC / loopMinor; + int relOutY = relOutX / tileOutW; + relC -= relOutX * loopMinor; + relOutX -= relOutY * tileOutW; + int c = baseC + relC; + int outX = tileOutX + relOutX; + int outY = tileOutY + relOutY; + + // Setup receptive field. + int midX = tileMidX + relOutX * downx; + int midY = tileMidY + relOutY * downy; + int inX = floor_div(midX, upx); + int inY = floor_div(midY, upy); + int relInX = inX - tileInX; + int relInY = inY - tileInY; + int filterX = (inX + 1) * upx - midX - 1; // flipped + int filterY = (inY + 1) * upy - midY - 1; // flipped + + // Inner loop. + if (outX < p.outSize.x & outY < p.outSize.y & c < p.outSize.z) + { + scalar_t v = 0; + #pragma unroll + for (int y = 0; y < filterH / upy; y++) + #pragma unroll + for (int x = 0; x < filterW / upx; x++) + v += sx[relInY + y][relInX + x][relC] * sf[filterY + y * upy][filterX + x * upx]; + v *= p.gain; + ((T*)p.y)[outX * p.outStride.x + outY * p.outStride.y + c * p.outStride.z + n * p.outStride.w] = (T)v; + } + } + } + } +} + +//------------------------------------------------------------------------ +// CUDA kernel selection. + +template upfirdn2d_kernel_spec choose_upfirdn2d_kernel(const upfirdn2d_kernel_params& p) +{ + int s = p.inStride.z, fx = p.filterSize.x, fy = p.filterSize.y; + upfirdn2d_kernel_spec spec = {(void*)upfirdn2d_kernel_large, -1,-1,1, 4}; // contiguous + if (s == 1) spec = {(void*)upfirdn2d_kernel_large, -1,-1,4, 1}; // channels_last + + // No up/downsampling. + if (p.up.x == 1 && p.up.y == 1 && p.down.x == 1 && p.down.y == 1) + { + // contiguous + if (s != 1 && fx <= 24 && fy <= 24) spec = {(void*)upfirdn2d_kernel_small, 64,32,1, 1}; + if (s != 1 && fx <= 16 && fy <= 16) spec = {(void*)upfirdn2d_kernel_small, 64,32,1, 1}; + if (s != 1 && fx <= 7 && fy <= 7 ) spec = {(void*)upfirdn2d_kernel_small, 64,16,1, 1}; + if (s != 1 && fx <= 6 && fy <= 6 ) spec = {(void*)upfirdn2d_kernel_small, 64,16,1, 1}; + if (s != 1 && fx <= 5 && fy <= 5 ) spec = {(void*)upfirdn2d_kernel_small, 64,16,1, 1}; + if (s != 1 && fx <= 4 && fy <= 4 ) spec = {(void*)upfirdn2d_kernel_small, 64,16,1, 1}; + if (s != 1 && fx <= 3 && fy <= 3 ) spec = {(void*)upfirdn2d_kernel_small, 64,16,1, 1}; + if (s != 1 && fx <= 24 && fy <= 1 ) spec = {(void*)upfirdn2d_kernel_small, 128,8,1, 1}; + if (s != 1 && fx <= 16 && fy <= 1 ) spec = {(void*)upfirdn2d_kernel_small, 128,8,1, 1}; + if (s != 1 && fx <= 8 && fy <= 1 ) spec = {(void*)upfirdn2d_kernel_small, 128,8,1, 1}; + if (s != 1 && fx <= 1 && fy <= 24) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + if (s != 1 && fx <= 1 && fy <= 16) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + if (s != 1 && fx <= 1 && fy <= 8 ) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + // channels_last + if (s == 1 && fx <= 24 && fy <= 24) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + if (s == 1 && fx <= 16 && fy <= 16) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + if (s == 1 && fx <= 7 && fy <= 7 ) spec = {(void*)upfirdn2d_kernel_small, 16,16,8, 1}; + if (s == 1 && fx <= 6 && fy <= 6 ) spec = {(void*)upfirdn2d_kernel_small, 16,16,8, 1}; + if (s == 1 && fx <= 5 && fy <= 5 ) spec = {(void*)upfirdn2d_kernel_small, 16,16,8, 1}; + if (s == 1 && fx <= 4 && fy <= 4 ) spec = {(void*)upfirdn2d_kernel_small, 16,16,8, 1}; + if (s == 1 && fx <= 3 && fy <= 3 ) spec = {(void*)upfirdn2d_kernel_small, 16,16,8, 1}; + if (s == 1 && fx <= 24 && fy <= 1 ) spec = {(void*)upfirdn2d_kernel_small, 128,1,16, 1}; + if (s == 1 && fx <= 16 && fy <= 1 ) spec = {(void*)upfirdn2d_kernel_small, 128,1,16, 1}; + if (s == 1 && fx <= 8 && fy <= 1 ) spec = {(void*)upfirdn2d_kernel_small, 128,1,16, 1}; + if (s == 1 && fx <= 1 && fy <= 24) spec = {(void*)upfirdn2d_kernel_small, 1,128,16, 1}; + if (s == 1 && fx <= 1 && fy <= 16) spec = {(void*)upfirdn2d_kernel_small, 1,128,16, 1}; + if (s == 1 && fx <= 1 && fy <= 8 ) spec = {(void*)upfirdn2d_kernel_small, 1,128,16, 1}; + } + + // 2x upsampling. + if (p.up.x == 2 && p.up.y == 2 && p.down.x == 1 && p.down.y == 1) + { + // contiguous + if (s != 1 && fx <= 24 && fy <= 24) spec = {(void*)upfirdn2d_kernel_small, 64,32,1, 1}; + if (s != 1 && fx <= 16 && fy <= 16) spec = {(void*)upfirdn2d_kernel_small, 64,32,1, 1}; + if (s != 1 && fx <= 8 && fy <= 8 ) spec = {(void*)upfirdn2d_kernel_small, 64,16,1, 1}; + if (s != 1 && fx <= 6 && fy <= 6 ) spec = {(void*)upfirdn2d_kernel_small, 64,16,1, 1}; + if (s != 1 && fx <= 4 && fy <= 4 ) spec = {(void*)upfirdn2d_kernel_small, 64,16,1, 1}; + if (s != 1 && fx <= 2 && fy <= 2 ) spec = {(void*)upfirdn2d_kernel_small, 64,16,1, 1}; + // channels_last + if (s == 1 && fx <= 24 && fy <= 24) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + if (s == 1 && fx <= 16 && fy <= 16) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + if (s == 1 && fx <= 8 && fy <= 8 ) spec = {(void*)upfirdn2d_kernel_small, 16,16,8, 1}; + if (s == 1 && fx <= 6 && fy <= 6 ) spec = {(void*)upfirdn2d_kernel_small, 16,16,8, 1}; + if (s == 1 && fx <= 4 && fy <= 4 ) spec = {(void*)upfirdn2d_kernel_small, 16,16,8, 1}; + if (s == 1 && fx <= 2 && fy <= 2 ) spec = {(void*)upfirdn2d_kernel_small, 16,16,8, 1}; + } + if (p.up.x == 2 && p.up.y == 1 && p.down.x == 1 && p.down.y == 1) + { + // contiguous + if (s != 1 && fx <= 24 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 128,8,1, 1}; + if (s != 1 && fx <= 16 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 128,8,1, 1}; + if (s != 1 && fx <= 8 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 128,8,1, 1}; + // channels_last + if (s == 1 && fx <= 24 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 128,1,16, 1}; + if (s == 1 && fx <= 16 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 128,1,16, 1}; + if (s == 1 && fx <= 8 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 128,1,16, 1}; + } + if (p.up.x == 1 && p.up.y == 2 && p.down.x == 1 && p.down.y == 1) + { + // contiguous + if (s != 1 && fx <= 1 && fy <= 24) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + if (s != 1 && fx <= 1 && fy <= 16) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + if (s != 1 && fx <= 1 && fy <= 8 ) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + // channels_last + if (s == 1 && fx <= 1 && fy <= 24) spec = {(void*)upfirdn2d_kernel_small, 1,128,16, 1}; + if (s == 1 && fx <= 1 && fy <= 16) spec = {(void*)upfirdn2d_kernel_small, 1,128,16, 1}; + if (s == 1 && fx <= 1 && fy <= 8 ) spec = {(void*)upfirdn2d_kernel_small, 1,128,16, 1}; + } + + // 2x downsampling. + if (p.up.x == 1 && p.up.y == 1 && p.down.x == 2 && p.down.y == 2) + { + // contiguous + if (s != 1 && fx <= 24 && fy <= 24) spec = {(void*)upfirdn2d_kernel_small, 32,16,1, 1}; + if (s != 1 && fx <= 16 && fy <= 16) spec = {(void*)upfirdn2d_kernel_small, 32,16,1, 1}; + if (s != 1 && fx <= 8 && fy <= 8 ) spec = {(void*)upfirdn2d_kernel_small, 32,8,1, 1}; + if (s != 1 && fx <= 6 && fy <= 6 ) spec = {(void*)upfirdn2d_kernel_small, 32,8,1, 1}; + if (s != 1 && fx <= 4 && fy <= 4 ) spec = {(void*)upfirdn2d_kernel_small, 32,8,1, 1}; + if (s != 1 && fx <= 2 && fy <= 2 ) spec = {(void*)upfirdn2d_kernel_small, 32,8,1, 1}; + // channels_last + if (s == 1 && fx <= 24 && fy <= 24) spec = {(void*)upfirdn2d_kernel_small, 16,16,1, 1}; + if (s == 1 && fx <= 16 && fy <= 16) spec = {(void*)upfirdn2d_kernel_small, 16,16,1, 1}; + if (s == 1 && fx <= 8 && fy <= 8 ) spec = {(void*)upfirdn2d_kernel_small, 8,8,8, 1}; + if (s == 1 && fx <= 6 && fy <= 6 ) spec = {(void*)upfirdn2d_kernel_small, 8,8,8, 1}; + if (s == 1 && fx <= 4 && fy <= 4 ) spec = {(void*)upfirdn2d_kernel_small, 8,8,8, 1}; + if (s == 1 && fx <= 2 && fy <= 2 ) spec = {(void*)upfirdn2d_kernel_small, 8,8,8, 1}; + } + if (p.up.x == 1 && p.up.y == 1 && p.down.x == 2 && p.down.y == 1) + { + // contiguous + if (s != 1 && fx <= 24 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 64,8,1, 1}; + if (s != 1 && fx <= 16 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 64,8,1, 1}; + if (s != 1 && fx <= 8 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 64,8,1, 1}; + // channels_last + if (s == 1 && fx <= 24 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 64,1,8, 1}; + if (s == 1 && fx <= 16 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 64,1,8, 1}; + if (s == 1 && fx <= 8 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 64,1,8, 1}; + } + if (p.up.x == 1 && p.up.y == 1 && p.down.x == 1 && p.down.y == 2) + { + // contiguous + if (s != 1 && fx <= 1 && fy <= 24) spec = {(void*)upfirdn2d_kernel_small, 32,16,1, 1}; + if (s != 1 && fx <= 1 && fy <= 16) spec = {(void*)upfirdn2d_kernel_small, 32,16,1, 1}; + if (s != 1 && fx <= 1 && fy <= 8 ) spec = {(void*)upfirdn2d_kernel_small, 32,16,1, 1}; + // channels_last + if (s == 1 && fx <= 1 && fy <= 24) spec = {(void*)upfirdn2d_kernel_small, 1,64,8, 1}; + if (s == 1 && fx <= 1 && fy <= 16) spec = {(void*)upfirdn2d_kernel_small, 1,64,8, 1}; + if (s == 1 && fx <= 1 && fy <= 8 ) spec = {(void*)upfirdn2d_kernel_small, 1,64,8, 1}; + } + + // 4x upsampling. + if (p.up.x == 4 && p.up.y == 4 && p.down.x == 1 && p.down.y == 1) + { + // contiguous + if (s != 1 && fx <= 48 && fy <= 48) spec = {(void*)upfirdn2d_kernel_small, 64,32,1, 1}; + if (s != 1 && fx <= 32 && fy <= 32) spec = {(void*)upfirdn2d_kernel_small, 64,32,1, 1}; + // channels_last + if (s == 1 && fx <= 48 && fy <= 48) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + if (s == 1 && fx <= 32 && fy <= 32) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + } + if (p.up.x == 4 && p.up.y == 1 && p.down.x == 1 && p.down.y == 1) + { + // contiguous + if (s != 1 && fx <= 48 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 128,8,1, 1}; + if (s != 1 && fx <= 32 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 128,8,1, 1}; + // channels_last + if (s == 1 && fx <= 48 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 128,1,16, 1}; + if (s == 1 && fx <= 32 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 128,1,16, 1}; + } + if (p.up.x == 1 && p.up.y == 4 && p.down.x == 1 && p.down.y == 1) + { + // contiguous + if (s != 1 && fx <= 1 && fy <= 48) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + if (s != 1 && fx <= 1 && fy <= 32) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + // channels_last + if (s == 1 && fx <= 1 && fy <= 48) spec = {(void*)upfirdn2d_kernel_small, 1,128,16, 1}; + if (s == 1 && fx <= 1 && fy <= 32) spec = {(void*)upfirdn2d_kernel_small, 1,128,16, 1}; + } + + // 4x downsampling (inefficient). + if (p.up.x == 1 && p.up.y == 1 && p.down.x == 4 && p.down.y == 1) + { + // contiguous + if (s != 1 && fx <= 48 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 32,8,1, 1}; + if (s != 1 && fx <= 32 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 32,8,1, 1}; + // channels_last + if (s == 1 && fx <= 48 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 32,1,8, 1}; + if (s == 1 && fx <= 32 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 32,1,8, 1}; + } + if (p.up.x == 1 && p.up.y == 1 && p.down.x == 1 && p.down.y == 4) + { + // contiguous + if (s != 1 && fx <= 1 && fy <= 48) spec = {(void*)upfirdn2d_kernel_small, 32,8,1, 1}; + if (s != 1 && fx <= 1 && fy <= 32) spec = {(void*)upfirdn2d_kernel_small, 32,8,1, 1}; + // channels_last + if (s == 1 && fx <= 1 && fy <= 48) spec = {(void*)upfirdn2d_kernel_small, 1,32,8, 1}; + if (s == 1 && fx <= 1 && fy <= 32) spec = {(void*)upfirdn2d_kernel_small, 1,32,8, 1}; + } + return spec; +} + +//------------------------------------------------------------------------ +// Template specializations. + +template upfirdn2d_kernel_spec choose_upfirdn2d_kernel (const upfirdn2d_kernel_params& p); +template upfirdn2d_kernel_spec choose_upfirdn2d_kernel (const upfirdn2d_kernel_params& p); +template upfirdn2d_kernel_spec choose_upfirdn2d_kernel(const upfirdn2d_kernel_params& p); + +//------------------------------------------------------------------------ diff --git a/torch_utils/ops/upfirdn2d.h b/torch_utils/ops/upfirdn2d.h new file mode 100644 index 0000000000000000000000000000000000000000..2793daf874492af01e8634a7863c036e17b6731f --- /dev/null +++ b/torch_utils/ops/upfirdn2d.h @@ -0,0 +1,59 @@ +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// +// NVIDIA CORPORATION and its licensors retain all intellectual property +// and proprietary rights in and to this software, related documentation +// and any modifications thereto. Any use, reproduction, disclosure or +// distribution of this software and related documentation without an express +// license agreement from NVIDIA CORPORATION is strictly prohibited. + +#include + +//------------------------------------------------------------------------ +// CUDA kernel parameters. + +struct upfirdn2d_kernel_params +{ + const void* x; + const float* f; + void* y; + + int2 up; + int2 down; + int2 pad0; + int flip; + float gain; + + int4 inSize; // [width, height, channel, batch] + int4 inStride; + int2 filterSize; // [width, height] + int2 filterStride; + int4 outSize; // [width, height, channel, batch] + int4 outStride; + int sizeMinor; + int sizeMajor; + + int loopMinor; + int loopMajor; + int loopX; + int launchMinor; + int launchMajor; +}; + +//------------------------------------------------------------------------ +// CUDA kernel specialization. + +struct upfirdn2d_kernel_spec +{ + void* kernel; + int tileOutW; + int tileOutH; + int loopMinor; + int loopX; +}; + +//------------------------------------------------------------------------ +// CUDA kernel selection. + +template upfirdn2d_kernel_spec choose_upfirdn2d_kernel(const upfirdn2d_kernel_params& p); + +//------------------------------------------------------------------------ diff --git a/torch_utils/ops/upfirdn2d.py b/torch_utils/ops/upfirdn2d.py new file mode 100644 index 0000000000000000000000000000000000000000..b544be1d52e97bfc02e59d08c30c6ddbb69bdbde --- /dev/null +++ b/torch_utils/ops/upfirdn2d.py @@ -0,0 +1,389 @@ +# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# NVIDIA CORPORATION and its licensors retain all intellectual property +# and proprietary rights in and to this software, related documentation +# and any modifications thereto. Any use, reproduction, disclosure or +# distribution of this software and related documentation without an express +# license agreement from NVIDIA CORPORATION is strictly prohibited. + +"""Custom PyTorch ops for efficient resampling of 2D images.""" + +import os +import numpy as np +import torch + +from .. import custom_ops +from .. import misc +from . import conv2d_gradfix + +#---------------------------------------------------------------------------- + +_plugin = None + +def _init(): + global _plugin + if _plugin is None: + _plugin = custom_ops.get_plugin( + module_name='upfirdn2d_plugin', + sources=['upfirdn2d.cpp', 'upfirdn2d.cu'], + headers=['upfirdn2d.h'], + source_dir=os.path.dirname(__file__), + extra_cuda_cflags=['--use_fast_math'], + ) + return True + +def _parse_scaling(scaling): + if isinstance(scaling, int): + scaling = [scaling, scaling] + assert isinstance(scaling, (list, tuple)) + assert all(isinstance(x, int) for x in scaling) + sx, sy = scaling + assert sx >= 1 and sy >= 1 + return sx, sy + +def _parse_padding(padding): + if isinstance(padding, int): + padding = [padding, padding] + assert isinstance(padding, (list, tuple)) + assert all(isinstance(x, int) for x in padding) + if len(padding) == 2: + padx, pady = padding + padding = [padx, padx, pady, pady] + padx0, padx1, pady0, pady1 = padding + return padx0, padx1, pady0, pady1 + +def _get_filter_size(f): + if f is None: + return 1, 1 + assert isinstance(f, torch.Tensor) and f.ndim in [1, 2] + fw = f.shape[-1] + fh = f.shape[0] + with misc.suppress_tracer_warnings(): + fw = int(fw) + fh = int(fh) + misc.assert_shape(f, [fh, fw][:f.ndim]) + assert fw >= 1 and fh >= 1 + return fw, fh + +#---------------------------------------------------------------------------- + +def setup_filter(f, device=torch.device('cpu'), normalize=True, flip_filter=False, gain=1, separable=None): + r"""Convenience function to setup 2D FIR filter for `upfirdn2d()`. + + Args: + f: Torch tensor, numpy array, or python list of the shape + `[filter_height, filter_width]` (non-separable), + `[filter_taps]` (separable), + `[]` (impulse), or + `None` (identity). + device: Result device (default: cpu). + normalize: Normalize the filter so that it retains the magnitude + for constant input signal (DC)? (default: True). + flip_filter: Flip the filter? (default: False). + gain: Overall scaling factor for signal magnitude (default: 1). + separable: Return a separable filter? (default: select automatically). + + Returns: + Float32 tensor of the shape + `[filter_height, filter_width]` (non-separable) or + `[filter_taps]` (separable). + """ + # Validate. + if f is None: + f = 1 + f = torch.as_tensor(f, dtype=torch.float32) + assert f.ndim in [0, 1, 2] + assert f.numel() > 0 + if f.ndim == 0: + f = f[np.newaxis] + + # Separable? + if separable is None: + separable = (f.ndim == 1 and f.numel() >= 8) + if f.ndim == 1 and not separable: + f = f.ger(f) + assert f.ndim == (1 if separable else 2) + + # Apply normalize, flip, gain, and device. + if normalize: + f /= f.sum() + if flip_filter: + f = f.flip(list(range(f.ndim))) + f = f * (gain ** (f.ndim / 2)) + f = f.to(device=device) + return f + +#---------------------------------------------------------------------------- + +def upfirdn2d(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1, impl='cuda'): + r"""Pad, upsample, filter, and downsample a batch of 2D images. + + Performs the following sequence of operations for each channel: + + 1. Upsample the image by inserting N-1 zeros after each pixel (`up`). + + 2. Pad the image with the specified number of zeros on each side (`padding`). + Negative padding corresponds to cropping the image. + + 3. Convolve the image with the specified 2D FIR filter (`f`), shrinking it + so that the footprint of all output pixels lies within the input image. + + 4. Downsample the image by keeping every Nth pixel (`down`). + + This sequence of operations bears close resemblance to scipy.signal.upfirdn(). + The fused op is considerably more efficient than performing the same calculation + using standard PyTorch ops. It supports gradients of arbitrary order. + + Args: + x: Float32/float64/float16 input tensor of the shape + `[batch_size, num_channels, in_height, in_width]`. + f: Float32 FIR filter of the shape + `[filter_height, filter_width]` (non-separable), + `[filter_taps]` (separable), or + `None` (identity). + up: Integer upsampling factor. Can be a single int or a list/tuple + `[x, y]` (default: 1). + down: Integer downsampling factor. Can be a single int or a list/tuple + `[x, y]` (default: 1). + padding: Padding with respect to the upsampled image. Can be a single number + or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` + (default: 0). + flip_filter: False = convolution, True = correlation (default: False). + gain: Overall scaling factor for signal magnitude (default: 1). + impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). + + Returns: + Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. + """ + assert isinstance(x, torch.Tensor) + assert impl in ['ref', 'cuda'] + if impl == 'cuda' and x.device.type == 'cuda' and _init(): + return _upfirdn2d_cuda(up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain).apply(x, f) + return _upfirdn2d_ref(x, f, up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain) + +#---------------------------------------------------------------------------- + +@misc.profiled_function +def _upfirdn2d_ref(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1): + """Slow reference implementation of `upfirdn2d()` using standard PyTorch ops. + """ + # Validate arguments. + assert isinstance(x, torch.Tensor) and x.ndim == 4 + if f is None: + f = torch.ones([1, 1], dtype=torch.float32, device=x.device) + assert isinstance(f, torch.Tensor) and f.ndim in [1, 2] + assert f.dtype == torch.float32 and not f.requires_grad + batch_size, num_channels, in_height, in_width = x.shape + upx, upy = _parse_scaling(up) + downx, downy = _parse_scaling(down) + padx0, padx1, pady0, pady1 = _parse_padding(padding) + + # Check that upsampled buffer is not smaller than the filter. + upW = in_width * upx + padx0 + padx1 + upH = in_height * upy + pady0 + pady1 + assert upW >= f.shape[-1] and upH >= f.shape[0] + + # Upsample by inserting zeros. + x = x.reshape([batch_size, num_channels, in_height, 1, in_width, 1]) + x = torch.nn.functional.pad(x, [0, upx - 1, 0, 0, 0, upy - 1]) + x = x.reshape([batch_size, num_channels, in_height * upy, in_width * upx]) + + # Pad or crop. + x = torch.nn.functional.pad(x, [max(padx0, 0), max(padx1, 0), max(pady0, 0), max(pady1, 0)]) + x = x[:, :, max(-pady0, 0) : x.shape[2] - max(-pady1, 0), max(-padx0, 0) : x.shape[3] - max(-padx1, 0)] + + # Setup filter. + f = f * (gain ** (f.ndim / 2)) + f = f.to(x.dtype) + if not flip_filter: + f = f.flip(list(range(f.ndim))) + + # Convolve with the filter. + f = f[np.newaxis, np.newaxis].repeat([num_channels, 1] + [1] * f.ndim) + if f.ndim == 4: + x = conv2d_gradfix.conv2d(input=x, weight=f, groups=num_channels) + else: + x = conv2d_gradfix.conv2d(input=x, weight=f.unsqueeze(2), groups=num_channels) + x = conv2d_gradfix.conv2d(input=x, weight=f.unsqueeze(3), groups=num_channels) + + # Downsample by throwing away pixels. + x = x[:, :, ::downy, ::downx] + return x + +#---------------------------------------------------------------------------- + +_upfirdn2d_cuda_cache = dict() + +def _upfirdn2d_cuda(up=1, down=1, padding=0, flip_filter=False, gain=1): + """Fast CUDA implementation of `upfirdn2d()` using custom ops. + """ + # Parse arguments. + upx, upy = _parse_scaling(up) + downx, downy = _parse_scaling(down) + padx0, padx1, pady0, pady1 = _parse_padding(padding) + + # Lookup from cache. + key = (upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain) + if key in _upfirdn2d_cuda_cache: + return _upfirdn2d_cuda_cache[key] + + # Forward op. + class Upfirdn2dCuda(torch.autograd.Function): + @staticmethod + def forward(ctx, x, f): # pylint: disable=arguments-differ + assert isinstance(x, torch.Tensor) and x.ndim == 4 + if f is None: + f = torch.ones([1, 1], dtype=torch.float32, device=x.device) + if f.ndim == 1 and f.shape[0] == 1: + f = f.square().unsqueeze(0) # Convert separable-1 into full-1x1. + assert isinstance(f, torch.Tensor) and f.ndim in [1, 2] + y = x + if f.ndim == 2: + y = _plugin.upfirdn2d(y, f, upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain) + else: + y = _plugin.upfirdn2d(y, f.unsqueeze(0), upx, 1, downx, 1, padx0, padx1, 0, 0, flip_filter, 1.0) + y = _plugin.upfirdn2d(y, f.unsqueeze(1), 1, upy, 1, downy, 0, 0, pady0, pady1, flip_filter, gain) + ctx.save_for_backward(f) + ctx.x_shape = x.shape + return y + + @staticmethod + def backward(ctx, dy): # pylint: disable=arguments-differ + f, = ctx.saved_tensors + _, _, ih, iw = ctx.x_shape + _, _, oh, ow = dy.shape + fw, fh = _get_filter_size(f) + p = [ + fw - padx0 - 1, + iw * upx - ow * downx + padx0 - upx + 1, + fh - pady0 - 1, + ih * upy - oh * downy + pady0 - upy + 1, + ] + dx = None + df = None + + if ctx.needs_input_grad[0]: + dx = _upfirdn2d_cuda(up=down, down=up, padding=p, flip_filter=(not flip_filter), gain=gain).apply(dy, f) + + assert not ctx.needs_input_grad[1] + return dx, df + + # Add to cache. + _upfirdn2d_cuda_cache[key] = Upfirdn2dCuda + return Upfirdn2dCuda + +#---------------------------------------------------------------------------- + +def filter2d(x, f, padding=0, flip_filter=False, gain=1, impl='cuda'): + r"""Filter a batch of 2D images using the given 2D FIR filter. + + By default, the result is padded so that its shape matches the input. + User-specified padding is applied on top of that, with negative values + indicating cropping. Pixels outside the image are assumed to be zero. + + Args: + x: Float32/float64/float16 input tensor of the shape + `[batch_size, num_channels, in_height, in_width]`. + f: Float32 FIR filter of the shape + `[filter_height, filter_width]` (non-separable), + `[filter_taps]` (separable), or + `None` (identity). + padding: Padding with respect to the output. Can be a single number or a + list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` + (default: 0). + flip_filter: False = convolution, True = correlation (default: False). + gain: Overall scaling factor for signal magnitude (default: 1). + impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). + + Returns: + Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. + """ + padx0, padx1, pady0, pady1 = _parse_padding(padding) + fw, fh = _get_filter_size(f) + p = [ + padx0 + fw // 2, + padx1 + (fw - 1) // 2, + pady0 + fh // 2, + pady1 + (fh - 1) // 2, + ] + return upfirdn2d(x, f, padding=p, flip_filter=flip_filter, gain=gain, impl=impl) + +#---------------------------------------------------------------------------- + +def upsample2d(x, f, up=2, padding=0, flip_filter=False, gain=1, impl='cuda'): + r"""Upsample a batch of 2D images using the given 2D FIR filter. + + By default, the result is padded so that its shape is a multiple of the input. + User-specified padding is applied on top of that, with negative values + indicating cropping. Pixels outside the image are assumed to be zero. + + Args: + x: Float32/float64/float16 input tensor of the shape + `[batch_size, num_channels, in_height, in_width]`. + f: Float32 FIR filter of the shape + `[filter_height, filter_width]` (non-separable), + `[filter_taps]` (separable), or + `None` (identity). + up: Integer upsampling factor. Can be a single int or a list/tuple + `[x, y]` (default: 1). + padding: Padding with respect to the output. Can be a single number or a + list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` + (default: 0). + flip_filter: False = convolution, True = correlation (default: False). + gain: Overall scaling factor for signal magnitude (default: 1). + impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). + + Returns: + Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. + """ + upx, upy = _parse_scaling(up) + padx0, padx1, pady0, pady1 = _parse_padding(padding) + fw, fh = _get_filter_size(f) + p = [ + padx0 + (fw + upx - 1) // 2, + padx1 + (fw - upx) // 2, + pady0 + (fh + upy - 1) // 2, + pady1 + (fh - upy) // 2, + ] + return upfirdn2d(x, f, up=up, padding=p, flip_filter=flip_filter, gain=gain*upx*upy, impl=impl) + +#---------------------------------------------------------------------------- + +def downsample2d(x, f, down=2, padding=0, flip_filter=False, gain=1, impl='cuda'): + r"""Downsample a batch of 2D images using the given 2D FIR filter. + + By default, the result is padded so that its shape is a fraction of the input. + User-specified padding is applied on top of that, with negative values + indicating cropping. Pixels outside the image are assumed to be zero. + + Args: + x: Float32/float64/float16 input tensor of the shape + `[batch_size, num_channels, in_height, in_width]`. + f: Float32 FIR filter of the shape + `[filter_height, filter_width]` (non-separable), + `[filter_taps]` (separable), or + `None` (identity). + down: Integer downsampling factor. Can be a single int or a list/tuple + `[x, y]` (default: 1). + padding: Padding with respect to the input. Can be a single number or a + list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` + (default: 0). + flip_filter: False = convolution, True = correlation (default: False). + gain: Overall scaling factor for signal magnitude (default: 1). + impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). + + Returns: + Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. + """ + downx, downy = _parse_scaling(down) + padx0, padx1, pady0, pady1 = _parse_padding(padding) + fw, fh = _get_filter_size(f) + p = [ + padx0 + (fw - downx + 1) // 2, + padx1 + (fw - downx) // 2, + pady0 + (fh - downy + 1) // 2, + pady1 + (fh - downy) // 2, + ] + return upfirdn2d(x, f, down=down, padding=p, flip_filter=flip_filter, gain=gain, impl=impl) + +#---------------------------------------------------------------------------- diff --git a/torch_utils/persistence.py b/torch_utils/persistence.py new file mode 100644 index 0000000000000000000000000000000000000000..f90ce85e8ace0f44e839158b22c5790de448d82d --- /dev/null +++ b/torch_utils/persistence.py @@ -0,0 +1,251 @@ +# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# NVIDIA CORPORATION and its licensors retain all intellectual property +# and proprietary rights in and to this software, related documentation +# and any modifications thereto. Any use, reproduction, disclosure or +# distribution of this software and related documentation without an express +# license agreement from NVIDIA CORPORATION is strictly prohibited. + +"""Facilities for pickling Python code alongside other data. + +The pickled code is automatically imported into a separate Python module +during unpickling. This way, any previously exported pickles will remain +usable even if the original code is no longer available, or if the current +version of the code is not consistent with what was originally pickled.""" + +import sys +import pickle +import io +import inspect +import copy +import uuid +import types +import dnnlib + +#---------------------------------------------------------------------------- + +_version = 6 # internal version number +_decorators = set() # {decorator_class, ...} +_import_hooks = [] # [hook_function, ...] +_module_to_src_dict = dict() # {module: src, ...} +_src_to_module_dict = dict() # {src: module, ...} + +#---------------------------------------------------------------------------- + +def persistent_class(orig_class): + r"""Class decorator that extends a given class to save its source code + when pickled. + + Example: + + from torch_utils import persistence + + @persistence.persistent_class + class MyNetwork(torch.nn.Module): + def __init__(self, num_inputs, num_outputs): + super().__init__() + self.fc = MyLayer(num_inputs, num_outputs) + ... + + @persistence.persistent_class + class MyLayer(torch.nn.Module): + ... + + When pickled, any instance of `MyNetwork` and `MyLayer` will save its + source code alongside other internal state (e.g., parameters, buffers, + and submodules). This way, any previously exported pickle will remain + usable even if the class definitions have been modified or are no + longer available. + + The decorator saves the source code of the entire Python module + containing the decorated class. It does *not* save the source code of + any imported modules. Thus, the imported modules must be available + during unpickling, also including `torch_utils.persistence` itself. + + It is ok to call functions defined in the same module from the + decorated class. However, if the decorated class depends on other + classes defined in the same module, they must be decorated as well. + This is illustrated in the above example in the case of `MyLayer`. + + It is also possible to employ the decorator just-in-time before + calling the constructor. For example: + + cls = MyLayer + if want_to_make_it_persistent: + cls = persistence.persistent_class(cls) + layer = cls(num_inputs, num_outputs) + + As an additional feature, the decorator also keeps track of the + arguments that were used to construct each instance of the decorated + class. The arguments can be queried via `obj.init_args` and + `obj.init_kwargs`, and they are automatically pickled alongside other + object state. A typical use case is to first unpickle a previous + instance of a persistent class, and then upgrade it to use the latest + version of the source code: + + with open('old_pickle.pkl', 'rb') as f: + old_net = pickle.load(f) + new_net = MyNetwork(*old_obj.init_args, **old_obj.init_kwargs) + misc.copy_params_and_buffers(old_net, new_net, require_all=True) + """ + assert isinstance(orig_class, type) + if is_persistent(orig_class): + return orig_class + + assert orig_class.__module__ in sys.modules + orig_module = sys.modules[orig_class.__module__] + orig_module_src = _module_to_src(orig_module) + + class Decorator(orig_class): + _orig_module_src = orig_module_src + _orig_class_name = orig_class.__name__ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._init_args = copy.deepcopy(args) + self._init_kwargs = copy.deepcopy(kwargs) + assert orig_class.__name__ in orig_module.__dict__ + _check_pickleable(self.__reduce__()) + + @property + def init_args(self): + return copy.deepcopy(self._init_args) + + @property + def init_kwargs(self): + return dnnlib.EasyDict(copy.deepcopy(self._init_kwargs)) + + def __reduce__(self): + fields = list(super().__reduce__()) + fields += [None] * max(3 - len(fields), 0) + if fields[0] is not _reconstruct_persistent_obj: + meta = dict(type='class', version=_version, module_src=self._orig_module_src, class_name=self._orig_class_name, state=fields[2]) + fields[0] = _reconstruct_persistent_obj # reconstruct func + fields[1] = (meta,) # reconstruct args + fields[2] = None # state dict + return tuple(fields) + + Decorator.__name__ = orig_class.__name__ + _decorators.add(Decorator) + return Decorator + +#---------------------------------------------------------------------------- + +def is_persistent(obj): + r"""Test whether the given object or class is persistent, i.e., + whether it will save its source code when pickled. + """ + try: + if obj in _decorators: + return True + except TypeError: + pass + return type(obj) in _decorators # pylint: disable=unidiomatic-typecheck + +#---------------------------------------------------------------------------- + +def import_hook(hook): + r"""Register an import hook that is called whenever a persistent object + is being unpickled. A typical use case is to patch the pickled source + code to avoid errors and inconsistencies when the API of some imported + module has changed. + + The hook should have the following signature: + + hook(meta) -> modified meta + + `meta` is an instance of `dnnlib.EasyDict` with the following fields: + + type: Type of the persistent object, e.g. `'class'`. + version: Internal version number of `torch_utils.persistence`. + module_src Original source code of the Python module. + class_name: Class name in the original Python module. + state: Internal state of the object. + + Example: + + @persistence.import_hook + def wreck_my_network(meta): + if meta.class_name == 'MyNetwork': + print('MyNetwork is being imported. I will wreck it!') + meta.module_src = meta.module_src.replace("True", "False") + return meta + """ + assert callable(hook) + _import_hooks.append(hook) + +#---------------------------------------------------------------------------- + +def _reconstruct_persistent_obj(meta): + r"""Hook that is called internally by the `pickle` module to unpickle + a persistent object. + """ + meta = dnnlib.EasyDict(meta) + meta.state = dnnlib.EasyDict(meta.state) + for hook in _import_hooks: + meta = hook(meta) + assert meta is not None + + assert meta.version == _version + module = _src_to_module(meta.module_src) + + assert meta.type == 'class' + orig_class = module.__dict__[meta.class_name] + decorator_class = persistent_class(orig_class) + obj = decorator_class.__new__(decorator_class) + + setstate = getattr(obj, '__setstate__', None) + if callable(setstate): + setstate(meta.state) # pylint: disable=not-callable + else: + obj.__dict__.update(meta.state) + return obj + +#---------------------------------------------------------------------------- + +def _module_to_src(module): + r"""Query the source code of a given Python module. + """ + src = _module_to_src_dict.get(module, None) + if src is None: + src = inspect.getsource(module) + _module_to_src_dict[module] = src + _src_to_module_dict[src] = module + return src + +def _src_to_module(src): + r"""Get or create a Python module for the given source code. + """ + module = _src_to_module_dict.get(src, None) + if module is None: + module_name = "_imported_module_" + uuid.uuid4().hex + module = types.ModuleType(module_name) + sys.modules[module_name] = module + _module_to_src_dict[module] = src + _src_to_module_dict[src] = module + exec(src, module.__dict__) # pylint: disable=exec-used + return module + +#---------------------------------------------------------------------------- + +def _check_pickleable(obj): + r"""Check that the given object is pickleable, raising an exception if + it is not. This function is expected to be considerably more efficient + than actually pickling the object. + """ + def recurse(obj): + if isinstance(obj, (list, tuple, set)): + return [recurse(x) for x in obj] + if isinstance(obj, dict): + return [[recurse(x), recurse(y)] for x, y in obj.items()] + if isinstance(obj, (str, int, float, bool, bytes, bytearray)): + return None # Python primitive types are pickleable. + if f'{type(obj).__module__}.{type(obj).__name__}' in ['numpy.ndarray', 'torch.Tensor', 'torch.nn.parameter.Parameter']: + return None # NumPy arrays and PyTorch tensors are pickleable. + if is_persistent(obj): + return None # Persistent objects are pickleable, by virtue of the constructor check. + return obj + with io.BytesIO() as f: + pickle.dump(recurse(obj), f) + +#---------------------------------------------------------------------------- diff --git a/torch_utils/training_stats.py b/torch_utils/training_stats.py new file mode 100644 index 0000000000000000000000000000000000000000..5de4134f1943e7c3104bbc926b2abaf828626525 --- /dev/null +++ b/torch_utils/training_stats.py @@ -0,0 +1,268 @@ +# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# NVIDIA CORPORATION and its licensors retain all intellectual property +# and proprietary rights in and to this software, related documentation +# and any modifications thereto. Any use, reproduction, disclosure or +# distribution of this software and related documentation without an express +# license agreement from NVIDIA CORPORATION is strictly prohibited. + +"""Facilities for reporting and collecting training statistics across +multiple processes and devices. The interface is designed to minimize +synchronization overhead as well as the amount of boilerplate in user +code.""" + +import re +import numpy as np +import torch +import dnnlib + +from . import misc + +#---------------------------------------------------------------------------- + +_num_moments = 3 # [num_scalars, sum_of_scalars, sum_of_squares] +_reduce_dtype = torch.float32 # Data type to use for initial per-tensor reduction. +_counter_dtype = torch.float64 # Data type to use for the internal counters. +_rank = 0 # Rank of the current process. +_sync_device = None # Device to use for multiprocess communication. None = single-process. +_sync_called = False # Has _sync() been called yet? +_counters = dict() # Running counters on each device, updated by report(): name => device => torch.Tensor +_cumulative = dict() # Cumulative counters on the CPU, updated by _sync(): name => torch.Tensor + +#---------------------------------------------------------------------------- + +def init_multiprocessing(rank, sync_device): + r"""Initializes `torch_utils.training_stats` for collecting statistics + across multiple processes. + + This function must be called after + `torch.distributed.init_process_group()` and before `Collector.update()`. + The call is not necessary if multi-process collection is not needed. + + Args: + rank: Rank of the current process. + sync_device: PyTorch device to use for inter-process + communication, or None to disable multi-process + collection. Typically `torch.device('cuda', rank)`. + """ + global _rank, _sync_device + assert not _sync_called + _rank = rank + _sync_device = sync_device + +#---------------------------------------------------------------------------- + +@misc.profiled_function +def report(name, value): + r"""Broadcasts the given set of scalars to all interested instances of + `Collector`, across device and process boundaries. + + This function is expected to be extremely cheap and can be safely + called from anywhere in the training loop, loss function, or inside a + `torch.nn.Module`. + + Warning: The current implementation expects the set of unique names to + be consistent across processes. Please make sure that `report()` is + called at least once for each unique name by each process, and in the + same order. If a given process has no scalars to broadcast, it can do + `report(name, [])` (empty list). + + Args: + name: Arbitrary string specifying the name of the statistic. + Averages are accumulated separately for each unique name. + value: Arbitrary set of scalars. Can be a list, tuple, + NumPy array, PyTorch tensor, or Python scalar. + + Returns: + The same `value` that was passed in. + """ + if name not in _counters: + _counters[name] = dict() + + elems = torch.as_tensor(value) + if elems.numel() == 0: + return value + + elems = elems.detach().flatten().to(_reduce_dtype) + moments = torch.stack([ + torch.ones_like(elems).sum(), + elems.sum(), + elems.square().sum(), + ]) + assert moments.ndim == 1 and moments.shape[0] == _num_moments + moments = moments.to(_counter_dtype) + + device = moments.device + if device not in _counters[name]: + _counters[name][device] = torch.zeros_like(moments) + _counters[name][device].add_(moments) + return value + +#---------------------------------------------------------------------------- + +def report0(name, value): + r"""Broadcasts the given set of scalars by the first process (`rank = 0`), + but ignores any scalars provided by the other processes. + See `report()` for further details. + """ + report(name, value if _rank == 0 else []) + return value + +#---------------------------------------------------------------------------- + +class Collector: + r"""Collects the scalars broadcasted by `report()` and `report0()` and + computes their long-term averages (mean and standard deviation) over + user-defined periods of time. + + The averages are first collected into internal counters that are not + directly visible to the user. They are then copied to the user-visible + state as a result of calling `update()` and can then be queried using + `mean()`, `std()`, `as_dict()`, etc. Calling `update()` also resets the + internal counters for the next round, so that the user-visible state + effectively reflects averages collected between the last two calls to + `update()`. + + Args: + regex: Regular expression defining which statistics to + collect. The default is to collect everything. + keep_previous: Whether to retain the previous averages if no + scalars were collected on a given round + (default: True). + """ + def __init__(self, regex='.*', keep_previous=True): + self._regex = re.compile(regex) + self._keep_previous = keep_previous + self._cumulative = dict() + self._moments = dict() + self.update() + self._moments.clear() + + def names(self): + r"""Returns the names of all statistics broadcasted so far that + match the regular expression specified at construction time. + """ + return [name for name in _counters if self._regex.fullmatch(name)] + + def update(self): + r"""Copies current values of the internal counters to the + user-visible state and resets them for the next round. + + If `keep_previous=True` was specified at construction time, the + operation is skipped for statistics that have received no scalars + since the last update, retaining their previous averages. + + This method performs a number of GPU-to-CPU transfers and one + `torch.distributed.all_reduce()`. It is intended to be called + periodically in the main training loop, typically once every + N training steps. + """ + if not self._keep_previous: + self._moments.clear() + for name, cumulative in _sync(self.names()): + if name not in self._cumulative: + self._cumulative[name] = torch.zeros([_num_moments], dtype=_counter_dtype) + delta = cumulative - self._cumulative[name] + self._cumulative[name].copy_(cumulative) + if float(delta[0]) != 0: + self._moments[name] = delta + + def _get_delta(self, name): + r"""Returns the raw moments that were accumulated for the given + statistic between the last two calls to `update()`, or zero if + no scalars were collected. + """ + assert self._regex.fullmatch(name) + if name not in self._moments: + self._moments[name] = torch.zeros([_num_moments], dtype=_counter_dtype) + return self._moments[name] + + def num(self, name): + r"""Returns the number of scalars that were accumulated for the given + statistic between the last two calls to `update()`, or zero if + no scalars were collected. + """ + delta = self._get_delta(name) + return int(delta[0]) + + def mean(self, name): + r"""Returns the mean of the scalars that were accumulated for the + given statistic between the last two calls to `update()`, or NaN if + no scalars were collected. + """ + delta = self._get_delta(name) + if int(delta[0]) == 0: + return float('nan') + return float(delta[1] / delta[0]) + + def std(self, name): + r"""Returns the standard deviation of the scalars that were + accumulated for the given statistic between the last two calls to + `update()`, or NaN if no scalars were collected. + """ + delta = self._get_delta(name) + if int(delta[0]) == 0 or not np.isfinite(float(delta[1])): + return float('nan') + if int(delta[0]) == 1: + return float(0) + mean = float(delta[1] / delta[0]) + raw_var = float(delta[2] / delta[0]) + return np.sqrt(max(raw_var - np.square(mean), 0)) + + def as_dict(self): + r"""Returns the averages accumulated between the last two calls to + `update()` as an `dnnlib.EasyDict`. The contents are as follows: + + dnnlib.EasyDict( + NAME = dnnlib.EasyDict(num=FLOAT, mean=FLOAT, std=FLOAT), + ... + ) + """ + stats = dnnlib.EasyDict() + for name in self.names(): + stats[name] = dnnlib.EasyDict(num=self.num(name), mean=self.mean(name), std=self.std(name)) + return stats + + def __getitem__(self, name): + r"""Convenience getter. + `collector[name]` is a synonym for `collector.mean(name)`. + """ + return self.mean(name) + +#---------------------------------------------------------------------------- + +def _sync(names): + r"""Synchronize the global cumulative counters across devices and + processes. Called internally by `Collector.update()`. + """ + if len(names) == 0: + return [] + global _sync_called + _sync_called = True + + # Collect deltas within current rank. + deltas = [] + device = _sync_device if _sync_device is not None else torch.device('cpu') + for name in names: + delta = torch.zeros([_num_moments], dtype=_counter_dtype, device=device) + for counter in _counters[name].values(): + delta.add_(counter.to(device)) + counter.copy_(torch.zeros_like(counter)) + deltas.append(delta) + deltas = torch.stack(deltas) + + # Sum deltas across ranks. + if _sync_device is not None: + torch.distributed.all_reduce(deltas) + + # Update cumulative values. + deltas = deltas.cpu() + for idx, name in enumerate(names): + if name not in _cumulative: + _cumulative[name] = torch.zeros([_num_moments], dtype=_counter_dtype) + _cumulative[name].add_(deltas[idx]) + + # Return name-value pairs. + return [(name, _cumulative[name]) for name in names] + +#---------------------------------------------------------------------------- diff --git a/utils/__init__.py b/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/utils/__pycache__/__init__.cpython-38.pyc b/utils/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4fc8061b45be55c8478e3771d824d811d6cbd3c5 Binary files /dev/null and b/utils/__pycache__/__init__.cpython-38.pyc differ diff --git a/utils/__pycache__/logger.cpython-38.pyc b/utils/__pycache__/logger.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb06b3011f2ad10806ac7d2073edd882ead10857 Binary files /dev/null and b/utils/__pycache__/logger.cpython-38.pyc differ diff --git a/utils/__pycache__/manipulator.cpython-38.pyc b/utils/__pycache__/manipulator.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..000f872b284ad62856d6890fe8e737c41b155198 Binary files /dev/null and b/utils/__pycache__/manipulator.cpython-38.pyc differ diff --git a/utils/logger.py b/utils/logger.py new file mode 100644 index 0000000000000000000000000000000000000000..e1927f476cbe68fb3a3f64750a0238068e52f9e4 --- /dev/null +++ b/utils/logger.py @@ -0,0 +1,67 @@ +# python3.7 +"""Utility functions for logging.""" + +import os +import sys +import logging + +__all__ = ['setup_logger'] + + +def setup_logger(work_dir=None, logfile_name='log.txt', logger_name='logger'): + """Sets up logger from target work directory. + + The function will sets up a logger with `DEBUG` log level. Two handlers will + be added to the logger automatically. One is the `sys.stdout` stream, with + `INFO` log level, which will print improtant messages on the screen. The other + is used to save all messages to file `$WORK_DIR/$LOGFILE_NAME`. Messages will + be added time stamp and log level before logged. + + NOTE: If `work_dir` or `logfile_name` is empty, the file stream will be + skipped. + + Args: + work_dir: The work directory. All intermediate files will be saved here. + (default: None) + logfile_name: Name of the file to save log message. (default: `log.txt`) + logger_name: Unique name for the logger. (default: `logger`) + + Returns: + A `logging.Logger` object. + + Raises: + SystemExit: If the work directory has already existed, of the logger with + specified name `logger_name` has already existed. + """ + + logger = logging.getLogger(logger_name) + # if logger.hasHandlers(): # Already existed + # raise SystemExit(f'Logger name `{logger_name}` has already been set up!\n' + # f'Please use another name, or otherwise the messages ' + # f'may be mixed between these two loggers.') + + logger.setLevel(logging.DEBUG) + formatter = logging.Formatter("[%(asctime)s][%(levelname)s] %(message)s") + + # Print log message with `INFO` level or above onto the screen. + sh = logging.StreamHandler(stream=sys.stdout) + sh.setLevel(logging.INFO) + sh.setFormatter(formatter) + logger.addHandler(sh) + + if not work_dir or not logfile_name: + return logger + + if os.path.exists(work_dir): + logger.warning('WARNING: Directory already exists, overwriting images') + #raise SystemExit(f'Work directory `{work_dir}` has already existed!\n' + # f'Please specify another one.') + else: + os.makedirs(work_dir) + # Save log message with all levels in log file. + fh = logging.FileHandler(os.path.join(work_dir, logfile_name)) + fh.setLevel(logging.DEBUG) + fh.setFormatter(formatter) + logger.addHandler(fh) + + return logger diff --git a/utils/manipulator.py b/utils/manipulator.py new file mode 100644 index 0000000000000000000000000000000000000000..051a83da1f4b5589fe76ff2e4d09e22f0fde3340 --- /dev/null +++ b/utils/manipulator.py @@ -0,0 +1,247 @@ +# python3.7 +"""Utility functions for latent codes manipulation.""" + +import numpy as np +from sklearn import svm + +from .logger import setup_logger + +__all__ = ['train_boundary', 'project_boundary', 'linear_interpolate'] + + +def train_boundary(latent_codes, + scores, + chosen_num_or_ratio=0.02, + split_ratio=0.7, + invalid_value=None, + logger=None): + """Trains boundary in latent space with offline predicted attribute scores. + + Given a collection of latent codes and the attribute scores predicted from the + corresponding images, this function will train a linear SVM by treating it as + a bi-classification problem. Basically, the samples with highest attribute + scores are treated as positive samples, while those with lowest scores as + negative. For now, the latent code can ONLY be with 1 dimension. + + NOTE: The returned boundary is with shape (1, latent_space_dim), and also + normalized with unit norm. + + Args: + latent_codes: Input latent codes as training data. + scores: Input attribute scores used to generate training labels. + chosen_num_or_ratio: How many samples will be chosen as positive (negative) + samples. If this field lies in range (0, 0.5], `chosen_num_or_ratio * + latent_codes_num` will be used. Otherwise, `min(chosen_num_or_ratio, + 0.5 * latent_codes_num)` will be used. (default: 0.02) + split_ratio: Ratio to split training and validation sets. (default: 0.7) + invalid_value: This field is used to filter out data. (default: None) + logger: Logger for recording log messages. If set as `None`, a default + logger, which prints messages from all levels to screen, will be created. + (default: None) + + Returns: + A decision boundary with type `numpy.ndarray`. + + Raises: + ValueError: If the input `latent_codes` or `scores` are with invalid format. + """ + if not logger: + logger = setup_logger(work_dir='', logger_name='train_boundary') + + if (not isinstance(latent_codes, np.ndarray) or + not len(latent_codes.shape) == 2): + raise ValueError(f'Input `latent_codes` should be with type' + f'`numpy.ndarray`, and shape [num_samples, ' + f'latent_space_dim]!') + num_samples = latent_codes.shape[0] + latent_space_dim = latent_codes.shape[1] + if (not isinstance(scores, np.ndarray) or not len(scores.shape) == 2 or + not scores.shape[0] == num_samples or not scores.shape[1] == 1): + raise ValueError(f'Input `scores` should be with type `numpy.ndarray`, and ' + f'shape [num_samples, 1], where `num_samples` should be ' + f'exactly same as that of input `latent_codes`!') + if chosen_num_or_ratio <= 0: + raise ValueError(f'Input `chosen_num_or_ratio` should be positive, ' + f'but {chosen_num_or_ratio} received!') + + logger.info(f'Filtering training data.') + if invalid_value is not None: + latent_codes = latent_codes[scores[:, 0] != invalid_value] + scores = scores[scores[:, 0] != invalid_value] + + logger.info(f'Sorting scores to get positive and negative samples.') + sorted_idx = np.argsort(scores, axis=0)[::-1, 0] + latent_codes = latent_codes[sorted_idx] + scores = scores[sorted_idx] + num_samples = latent_codes.shape[0] + if 0 < chosen_num_or_ratio <= 1: + chosen_num = int(num_samples * chosen_num_or_ratio) + else: + chosen_num = int(chosen_num_or_ratio) + chosen_num = min(chosen_num, num_samples // 2) + + logger.info(f'Spliting training and validation sets:') + train_num = int(chosen_num * split_ratio) + val_num = chosen_num - train_num + # Positive samples. + positive_idx = np.arange(chosen_num) + np.random.shuffle(positive_idx) + positive_train = latent_codes[:chosen_num][positive_idx[:train_num]] + positive_val = latent_codes[:chosen_num][positive_idx[train_num:]] + # Negative samples. + negative_idx = np.arange(chosen_num) + np.random.shuffle(negative_idx) + negative_train = latent_codes[-chosen_num:][negative_idx[:train_num]] + negative_val = latent_codes[-chosen_num:][negative_idx[train_num:]] + # Training set. + train_data = np.concatenate([positive_train, negative_train], axis=0) + train_label = np.concatenate([np.ones(train_num, dtype=np.int), + np.zeros(train_num, dtype=np.int)], axis=0) + logger.info(f' Training: {train_num} positive, {train_num} negative.') + # Validation set. + val_data = np.concatenate([positive_val, negative_val], axis=0) + val_label = np.concatenate([np.ones(val_num, dtype=np.int), + np.zeros(val_num, dtype=np.int)], axis=0) + logger.info(f' Validation: {val_num} positive, {val_num} negative.') + # Remaining set. + remaining_num = num_samples - chosen_num * 2 + remaining_data = latent_codes[chosen_num:-chosen_num] + remaining_scores = scores[chosen_num:-chosen_num] + decision_value = (scores[0] + scores[-1]) / 2 + remaining_label = np.ones(remaining_num, dtype=np.int) + remaining_label[remaining_scores.ravel() < decision_value] = 0 + remaining_positive_num = np.sum(remaining_label == 1) + remaining_negative_num = np.sum(remaining_label == 0) + logger.info(f' Remaining: {remaining_positive_num} positive, ' + f'{remaining_negative_num} negative.') + + logger.info(f'Training boundary.') + clf = svm.SVC(kernel='linear') + classifier = clf.fit(train_data, train_label) + logger.info(f'Finish training.') + + if val_num: + val_prediction = classifier.predict(val_data) + correct_num = np.sum(val_label == val_prediction) + logger.info(f'Accuracy for validation set: ' + f'{correct_num} / {val_num * 2} = ' + f'{correct_num / (val_num * 2):.6f}') + + if remaining_num: + remaining_prediction = classifier.predict(remaining_data) + correct_num = np.sum(remaining_label == remaining_prediction) + logger.info(f'Accuracy for remaining set: ' + f'{correct_num} / {remaining_num} = ' + f'{correct_num / remaining_num:.6f}') + + a = classifier.coef_.reshape(1, latent_space_dim).astype(np.float32) + return a / np.linalg.norm(a) + + +def project_boundary(primal, *args): + """Projects the primal boundary onto condition boundaries. + + The function is used for conditional manipulation, where the projected vector + will be subscribed from the normal direction of the original boundary. Here, + all input boundaries are supposed to have already been normalized to unit + norm, and with same shape [1, latent_space_dim]. + + Args: + primal: The primal boundary. + *args: Other boundaries as conditions. + + Returns: + A projected boundary (also normalized to unit norm), which is orthogonal to + all condition boundaries. + + Raises: + LinAlgError: If there are more than two condition boundaries and the method fails + to find a projected boundary orthogonal to all condition boundaries. + """ + assert len(primal.shape) == 2 and primal.shape[0] == 1 + + if not args: + return primal + if len(args) == 1: + cond = args[0] + assert (len(cond.shape) == 2 and cond.shape[0] == 1 and + cond.shape[1] == primal.shape[1]) + new = primal - primal.dot(cond.T) * cond + return new / np.linalg.norm(new) + elif len(args) == 2: + cond_1 = args[0] + cond_2 = args[1] + assert (len(cond_1.shape) == 2 and cond_1.shape[0] == 1 and + cond_1.shape[1] == primal.shape[1]) + assert (len(cond_2.shape) == 2 and cond_2.shape[0] == 1 and + cond_2.shape[1] == primal.shape[1]) + primal_cond_1 = primal.dot(cond_1.T) + primal_cond_2 = primal.dot(cond_2.T) + cond_1_cond_2 = cond_1.dot(cond_2.T) + alpha = (primal_cond_1 - primal_cond_2 * cond_1_cond_2) / ( + 1 - cond_1_cond_2 ** 2 + 1e-8) + beta = (primal_cond_2 - primal_cond_1 * cond_1_cond_2) / ( + 1 - cond_1_cond_2 ** 2 + 1e-8) + new = primal - alpha * cond_1 - beta * cond_2 + return new / np.linalg.norm(new) + else: + for cond_boundary in args: + assert (len(cond_boundary.shape) == 2 and cond_boundary.shape[0] == 1 and + cond_boundary.shape[1] == primal.shape[1]) + cond_boundaries = np.squeeze(np.asarray(args)) + A = np.matmul(cond_boundaries, cond_boundaries.T) + B = np.matmul(cond_boundaries, primal.T) + x = np.linalg.solve(A, B) + new = primal - (np.matmul(x.T, cond_boundaries)) + return new / np.linalg.norm(new) + + +def linear_interpolate(latent_code, + boundary, + start_distance=-3.0, + end_distance=3.0, + steps=10): + """Manipulates the given latent code with respect to a particular boundary. + + Basically, this function takes a latent code and a boundary as inputs, and + outputs a collection of manipulated latent codes. For example, let `steps` to + be 10, then the input `latent_code` is with shape [1, latent_space_dim], input + `boundary` is with shape [1, latent_space_dim] and unit norm, the output is + with shape [10, latent_space_dim]. The first output latent code is + `start_distance` away from the given `boundary`, while the last output latent + code is `end_distance` away from the given `boundary`. Remaining latent codes + are linearly interpolated. + + Input `latent_code` can also be with shape [1, num_layers, latent_space_dim] + to support W+ space in Style GAN. In this case, all features in W+ space will + be manipulated same as each other. Accordingly, the output will be with shape + [10, num_layers, latent_space_dim]. + + NOTE: Distance is sign sensitive. + + Args: + latent_code: The input latent code for manipulation. + boundary: The semantic boundary as reference. + start_distance: The distance to the boundary where the manipulation starts. + (default: -3.0) + end_distance: The distance to the boundary where the manipulation ends. + (default: 3.0) + steps: Number of steps to move the latent code from start position to end + position. (default: 10) + """ + assert (latent_code.shape[0] == 1 and boundary.shape[0] == 1 and + len(boundary.shape) == 2 and + boundary.shape[1] == latent_code.shape[-1]) + + linspace = np.linspace(start_distance, end_distance, steps) + if len(latent_code.shape) == 2: + linspace = linspace - latent_code.dot(boundary.T) + linspace = linspace.reshape(-1, 1).astype(np.float32) + return latent_code + linspace * boundary + if len(latent_code.shape) == 3: + linspace = linspace.reshape(-1, 1, 1).astype(np.float32) + return latent_code + linspace * boundary.reshape(1, 1, -1) + raise ValueError(f'Input `latent_code` should be with shape ' + f'[1, latent_space_dim] or [1, N, latent_space_dim] for ' + f'W+ space in Style GAN!\n' + f'But {latent_code.shape} is received.')