{ "cells": [ { "cell_type": "markdown", "metadata": { "id": "qJDJLE3v0HNr" }, "source": [ "# Fetch Codebase" ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "cellView": "form", "id": "JqiWKjpFa0ov" }, "outputs": [ { "ename": "FileNotFoundError", "evalue": "[Errno 2] No such file or directory: '/content'", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mFileNotFoundError\u001b[0m Traceback (most recent call last)", "\u001b[1;32m/home/johannes/Projects/Vision/sefa/docs/SeFa.ipynb Cell 2'\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[39m#@title\u001b[39;00m\n\u001b[1;32m 2\u001b[0m \u001b[39mimport\u001b[39;00m \u001b[39mos\u001b[39;00m\n\u001b[0;32m----> 3\u001b[0m os\u001b[39m.\u001b[39;49mchdir(\u001b[39m'\u001b[39;49m\u001b[39m/content\u001b[39;49m\u001b[39m'\u001b[39;49m)\n\u001b[1;32m 4\u001b[0m CODE_DIR \u001b[39m=\u001b[39m \u001b[39m'\u001b[39m\u001b[39msefa\u001b[39m\u001b[39m'\u001b[39m\n\u001b[1;32m 5\u001b[0m \u001b[39m#!git clone https://github.com/genforce/sefa.git $CODE_DIR\u001b[39;00m\n", "\u001b[0;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: '/content'" ] } ], "source": [ "#@title\n", "import os\n", "CODE_DIR = 'sefa'\n", "#!git clone https://github.com/genforce/sefa.git $CODE_DIR\n", "os.chdir(f'..')" ] }, { "cell_type": "markdown", "metadata": { "id": "hQ_IXBZr8YcJ" }, "source": [ "# Define Utility Functions" ] }, { "cell_type": "code", "execution_count": 1, "metadata": { "cellView": "form", "id": "ijKTlG5GeTd3" }, "outputs": [ { "ename": "ModuleNotFoundError", "evalue": "No module named 'models'", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)", "\u001b[1;32m/home/johannes/Projects/Vision/sefa/docs/SeFa.ipynb Cell 4'\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 7\u001b[0m \u001b[39mimport\u001b[39;00m \u001b[39mPIL\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mImage\u001b[39;00m\n\u001b[1;32m 9\u001b[0m \u001b[39mimport\u001b[39;00m \u001b[39mtorch\u001b[39;00m\n\u001b[0;32m---> 11\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mmodels\u001b[39;00m \u001b[39mimport\u001b[39;00m parse_gan_type\n\u001b[1;32m 12\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mutils\u001b[39;00m \u001b[39mimport\u001b[39;00m to_tensor\n\u001b[1;32m 13\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mutils\u001b[39;00m \u001b[39mimport\u001b[39;00m postprocess\n", "\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'models'" ] } ], "source": [ "#@title\n", "import os.path\n", "import io\n", "import IPython.display\n", "import numpy as np\n", "import cv2\n", "import PIL.Image\n", "\n", "import torch\n", "\n", "from models import parse_gan_type\n", "from utils import to_tensor\n", "from utils import postprocess\n", "from utils import load_generator\n", "from utils import factorize_weight\n", "\n", "\n", "def sample(generator, gan_type, num=1, seed=0):\n", " \"\"\"Samples latent codes.\"\"\"\n", " torch.manual_seed(seed)\n", " codes = torch.randn(num, generator.z_space_dim).cuda()\n", " if gan_type == 'pggan':\n", " codes = generator.layer0.pixel_norm(codes)\n", " elif gan_type == 'stylegan':\n", " codes = generator.mapping(codes)['w']\n", " codes = generator.truncation(codes, trunc_psi=0.7, trunc_layers=8)\n", " elif gan_type == 'stylegan2':\n", " codes = generator.mapping(codes)['w']\n", " codes = generator.truncation(codes, trunc_psi=0.5, trunc_layers=18)\n", " codes = codes.detach().cpu().numpy()\n", " return codes\n", "\n", "\n", "def synthesize(generator, gan_type, codes):\n", " \"\"\"Synthesizes images with the give codes.\"\"\"\n", " if gan_type == 'pggan':\n", " images = generator(to_tensor(codes))['image']\n", " elif gan_type in ['stylegan', 'stylegan2']:\n", " images = generator.synthesis(to_tensor(codes))['image']\n", " images = postprocess(images)\n", " return images\n", "\n", "\n", "def imshow(images, col, viz_size=256):\n", " \"\"\"Shows images in one figure.\"\"\"\n", " num, height, width, channels = images.shape\n", " assert num % col == 0\n", " row = num // col\n", "\n", " fused_image = np.zeros((viz_size * row, viz_size * col, channels), dtype=np.uint8)\n", "\n", " for idx, image in enumerate(images):\n", " i, j = divmod(idx, col)\n", " y = i * viz_size\n", " x = j * viz_size\n", " if height != viz_size or width != viz_size:\n", " image = cv2.resize(image, (viz_size, viz_size))\n", " fused_image[y:y + viz_size, x:x + viz_size] = image\n", "\n", " fused_image = np.asarray(fused_image, dtype=np.uint8)\n", " data = io.BytesIO()\n", " PIL.Image.fromarray(fused_image).save(data, 'jpeg')\n", " im_data = data.getvalue()\n", " disp = IPython.display.display(IPython.display.Image(im_data))\n", " return disp" ] }, { "cell_type": "markdown", "metadata": { "id": "Q7gkmrVW8eR1" }, "source": [ "# Select a Model" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "NoWI4fPQ6Gnf" }, "outputs": [], "source": [ "#@title { display-mode: \"form\", run: \"auto\" }\n", "model_name = \"stylegan_animeface512\" #@param ['stylegan_animeface512', 'stylegan_car512', 'stylegan_cat256', 'pggan_celebahq1024', 'stylegan_bedroom256']\n", "\n", "generator = load_generator(model_name)\n", "gan_type = parse_gan_type(generator)" ] }, { "cell_type": "markdown", "metadata": { "id": "zDStH1O5t1KC" }, "source": [ "# Sample Latent Codes" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "qlRGKZbJt9hA" }, "outputs": [], "source": [ "#@title { display-mode: \"form\", run: \"auto\" }\n", "\n", "num_samples = 3 #@param {type:\"slider\", min:1, max:8, step:1}\n", "noise_seed = 0 #@param {type:\"slider\", min:0, max:1000, step:1}\n", "\n", "codes = sample(generator, gan_type, num_samples, noise_seed)\n", "images = synthesize(generator, gan_type, codes)\n", "imshow(images, col=num_samples)" ] }, { "cell_type": "markdown", "metadata": { "id": "MmRPN3xz8jCH" }, "source": [ "# Factorize & Edit" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "ccONBF60mVir" }, "outputs": [], "source": [ "#@title { display-mode: \"form\", run: \"auto\" }\n", "\n", "layer_idx = \"0-1\" #@param ['all', '0-1', '2-5', '6-13']\n", "semantic_1 = 0 #@param {type:\"slider\", min:-3.0, max:3.0, step:0.1}\n", "semantic_2 = 0 #@param {type:\"slider\", min:-3.0, max:3.0, step:0.1}\n", "semantic_3 = 0 #@param {type:\"slider\", min:-3.0, max:3.0, step:0.1}\n", "semantic_4 = 0 #@param {type:\"slider\", min:-3.0, max:3.0, step:0.1}\n", "semantic_5 = 0 #@param {type:\"slider\", min:-3.0, max:3.0, step:0.1}\n", "\n", "# Fast implementation to factorize the weight by SeFa.\n", "layers, boundaries, _ = factorize_weight(generator, layer_idx)\n", "\n", "new_codes = codes.copy()\n", "for sem_idx in range(5):\n", " boundary = boundaries[sem_idx:sem_idx + 1]\n", " step = eval(f'semantic_{sem_idx + 1}')\n", " if gan_type == 'pggan':\n", " new_codes += boundary * step\n", " elif gan_type in ['stylegan', 'stylegan2']:\n", " new_codes[:, layers, :] += boundary * step\n", "new_images = synthesize(generator, gan_type, new_codes)\n", "imshow(new_images, col=num_samples)" ] } ], "metadata": { "accelerator": "GPU", "colab": { "collapsed_sections": [], "name": "SeFa", "provenance": [], "toc_visible": true }, "kernelspec": { "display_name": "Python 3", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.9" } }, "nbformat": 4, "nbformat_minor": 0 }