{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "ghyQbUni7v5O"
   },
   "source": [
    "# Fetch Codebase and Models"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "z5tvj5uIHo_9"
   },
   "outputs": [],
   "source": [
    "import os\n",
    "\n",
    "os.chdir('/content')\n",
    "CODE_DIR = 'idinvert'\n",
    "if not os.path.exists(CODE_DIR):\n",
    "  !git clone https://github.com/genforce/idinvert_pytorch.git $CODE_DIR\n",
    "os.chdir(f'./{CODE_DIR}')\n",
    "MODEL_DIR = os.path.join('models', 'pretrain')\n",
    "os.makedirs(MODEL_DIR, exist_ok=True)\n",
    "!wget https://mycuhk-my.sharepoint.com/:u:/g/personal/1155082926_link_cuhk_edu_hk/EXqix_JIEgtLl1FXI4uCkr8B5GPaiJyiLXL6cFbdcIKqEA?e=WYesel\\&download\\=1 -O $MODEL_DIR/styleganinv_ffhq256_encoder.pth  --quiet\n",
    "!wget https://mycuhk-my.sharepoint.com/:u:/g/personal/1155082926_link_cuhk_edu_hk/EbuzMQ3ZLl1AqvKJzeeBq7IBoQD-C1LfMIC8USlmOMPt3Q?e=CMXn8W\\&download\\=1 -O $MODEL_DIR/styleganinv_ffhq256_generator.pth  --quiet\n",
    "!wget https://mycuhk-my.sharepoint.com/:u:/g/personal/1155082926_link_cuhk_edu_hk/EQJUz9DInbxEnp0aomkGGzAB5b3ZZbtsOA-TXct9E4ONqA?e=smtO0T\\&download\\=1 -O $MODEL_DIR/vgg16.pth  --quiet\n",
    "!nvidia-smi\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "GPRv4j9q8ClG"
   },
   "source": [
    "# Define Utility Functions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "tsvI6WP5eD-f"
   },
   "outputs": [],
   "source": [
    "# python 3.6\n",
    "\"\"\"Demo.\"\"\"\n",
    "import os\n",
    "import sys\n",
    "import io\n",
    "import bz2\n",
    "import requests\n",
    "import dlib\n",
    "import numpy as np\n",
    "from PIL import Image\n",
    "import IPython.display\n",
    "import scipy.ndimage\n",
    "from google.colab import files\n",
    "from google.colab import output\n",
    "from utils.editor import manipulate\n",
    "from utils.inverter import StyleGANInverter\n",
    "from models.helper import build_generator\n",
    "\n",
    "\n",
    "LANDMARK_MODEL_NAME = 'shape_predictor_68_face_landmarks.dat'\n",
    "LANDMARK_MODEL_PATH = os.path.join(MODEL_DIR, LANDMARK_MODEL_NAME)\n",
    "LANDMARK_MODEL_URL = f'http://dlib.net/files/{LANDMARK_MODEL_NAME}.bz2'\n",
    "model_name = 'styleganinv_ffhq256'\n",
    "pre = 'examples'\n",
    "inverted_code_dir = 'inverted_codes'\n",
    "os.makedirs(inverted_code_dir, exist_ok=True)\n",
    "\n",
    "class FaceLandmarkDetector(object):\n",
    "  \"\"\"Class of face landmark detector.\"\"\"\n",
    "\n",
    "  def __init__(self, align_size=256, enable_padding=True):\n",
    "    \"\"\"Initializes face detector and landmark detector.\n",
    "\n",
    "  Args:\n",
    "    align_size: Size of the aligned face if performing face alignment.\n",
    "    (default: 1024)\n",
    "    enable_padding: Whether to enable padding for face alignment (default:\n",
    "    True)\n",
    "  \"\"\"\n",
    "    # Download models if needed.\n",
    "    if not os.path.exists(LANDMARK_MODEL_PATH):\n",
    "      data = requests.get(LANDMARK_MODEL_URL)\n",
    "      data_decompressed = bz2.decompress(data.content)\n",
    "      with open(LANDMARK_MODEL_PATH, 'wb') as f:\n",
    "        f.write(data_decompressed)\n",
    "\n",
    "    self.face_detector = dlib.get_frontal_face_detector()\n",
    "    self.landmark_detector = dlib.shape_predictor(LANDMARK_MODEL_PATH)\n",
    "    self.align_size = align_size\n",
    "    self.enable_padding = enable_padding\n",
    "\n",
    "  def detect(self, image_path):\n",
    "    \"\"\"Detects landmarks from the given image.\n",
    "\n",
    "  This function will first perform face detection on the input image. All\n",
    "  detected results will be grouped into a list. If no face is detected, an\n",
    "  empty list will be returned.\n",
    "\n",
    "  For each element in the list, it is a dictionary consisting of `image_path`,\n",
    "  `bbox` and `landmarks`. `image_path` is the path to the input image. `bbox`\n",
    "  is the 4-element bounding box with order (left, top, right, bottom), and\n",
    "  `landmarks` is a list of 68 (x, y) points.\n",
    "\n",
    "  Args:\n",
    "    image_path: Path to the image to detect landmarks from.\n",
    "\n",
    "  Returns:\n",
    "    A list of dictionaries, each of which is the detection results of a\n",
    "    particular face.\n",
    "  \"\"\"\n",
    "    results = []\n",
    "\n",
    "    # image_ = np.array(image)\n",
    "    images = dlib.load_rgb_image(image_path)\n",
    "    # Face detection (1 means to upsample the image for 1 time.)\n",
    "    bboxes = self.face_detector(images, 1)\n",
    "    # Landmark detection\n",
    "    for bbox in bboxes:\n",
    "      landmarks = []\n",
    "      for point in self.landmark_detector(images, bbox).parts():\n",
    "        landmarks.append((point.x, point.y))\n",
    "      results.append({\n",
    "          'image_path': image_path,\n",
    "          'bbox': (bbox.left(), bbox.top(), bbox.right(), bbox.bottom()),\n",
    "          'landmarks': landmarks,\n",
    "      })\n",
    "    return results\n",
    "\n",
    "  def align(self, face_info):\n",
    "    \"\"\"Aligns face based on landmark detection.\n",
    "\n",
    "  The face alignment process is borrowed from\n",
    "  https://github.com/NVlabs/ffhq-dataset/blob/master/download_ffhq.py,\n",
    "  which only supports aligning faces to square size.\n",
    "\n",
    "  Args:\n",
    "    face_info: Face information, which is the element of the list returned by\n",
    "    `self.detect()`.\n",
    "\n",
    "  Returns:\n",
    "    A `np.ndarray`, containing the aligned result. It is with `RGB` channel\n",
    "    order.\n",
    "  \"\"\"\n",
    "    img = Image.open(face_info['image_path'])\n",
    "\n",
    "    landmarks = np.array(face_info['landmarks'])\n",
    "    eye_left = np.mean(landmarks[36: 42], axis=0)\n",
    "    eye_right = np.mean(landmarks[42: 48], axis=0)\n",
    "    eye_middle = (eye_left + eye_right) / 2\n",
    "    eye_to_eye = eye_right - eye_left\n",
    "    mouth_middle = (landmarks[48] + landmarks[54]) / 2\n",
    "    eye_to_mouth = mouth_middle - eye_middle\n",
    "\n",
    "    # Choose oriented crop rectangle.\n",
    "    x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]\n",
    "    x /= np.hypot(*x)\n",
    "    x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)\n",
    "    y = np.flipud(x) * [-1, 1]\n",
    "    c = eye_middle + eye_to_mouth * 0.1\n",
    "    quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])\n",
    "    qsize = np.hypot(*x) * 2\n",
    "\n",
    "    # Shrink.\n",
    "    shrink = int(np.floor(qsize / self.align_size * 0.5))\n",
    "    if shrink > 1:\n",
    "      rsize = (int(np.rint(float(img.size[0]) / shrink)),\n",
    "               int(np.rint(float(img.size[1]) / shrink)))\n",
    "      img = img.resize(rsize, Image.ANTIALIAS)\n",
    "      quad /= shrink\n",
    "      qsize /= shrink\n",
    "\n",
    "    # Crop.\n",
    "    border = max(int(np.rint(qsize * 0.1)), 3)\n",
    "    crop = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))),\n",
    "            int(np.ceil(max(quad[:, 0]))), int(np.ceil(max(quad[:, 1]))))\n",
    "    crop = (max(crop[0] - border, 0),\n",
    "            max(crop[1] - border, 0),\n",
    "            min(crop[2] + border, img.size[0]),\n",
    "            min(crop[3] + border, img.size[1]))\n",
    "    if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:\n",
    "      img = img.crop(crop)\n",
    "      quad -= crop[0:2]\n",
    "\n",
    "    # Pad.\n",
    "    pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))),\n",
    "           int(np.ceil(max(quad[:, 0]))), int(np.ceil(max(quad[:, 1]))))\n",
    "    pad = (max(-pad[0] + border, 0),\n",
    "           max(-pad[1] + border, 0),\n",
    "           max(pad[2] - img.size[0] + border, 0),\n",
    "           max(pad[3] - img.size[1] + border, 0))\n",
    "    if self.enable_padding and max(pad) > border - 4:\n",
    "      pad = np.maximum(pad, int(np.rint(qsize * 0.3)))\n",
    "      img = np.pad(np.float32(img),\n",
    "                   ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)),\n",
    "                   'reflect')\n",
    "      h, w, _ = img.shape\n",
    "      y, x, _ = np.ogrid[:h, :w, :1]\n",
    "      mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0],\n",
    "                                         np.float32(w - 1 - x) / pad[2]),\n",
    "                        1.0 - np.minimum(np.float32(y) / pad[1],\n",
    "                                         np.float32(h - 1 - y) / pad[3]))\n",
    "      blur = qsize * 0.02\n",
    "      blurred_image = scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img\n",
    "      img += blurred_image * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)\n",
    "      img += (np.median(img, axis=(0, 1)) - img) * np.clip(mask, 0.0, 1.0)\n",
    "      img = Image.fromarray(np.uint8(np.clip(np.rint(img), 0, 255)), 'RGB')\n",
    "      quad += pad[:2]\n",
    "\n",
    "    # Transform.\n",
    "    img = img.transform((self.align_size * 4, self.align_size * 4), Image.QUAD,\n",
    "                        (quad + 0.5).flatten(), Image.BILINEAR)\n",
    "    img = img.resize((self.align_size, self.align_size), Image.ANTIALIAS)\n",
    "\n",
    "    return np.array(img)\n",
    "\n",
    "\n",
    "def align_face(image_path, align_size=256):\n",
    "  \"\"\"Aligns a given face.\"\"\"\n",
    "  model = FaceLandmarkDetector(align_size)\n",
    "  face_infos = model.detect(image_path)\n",
    "  face_infos = face_infos[0]\n",
    "  img = model.align(face_infos)\n",
    "  return img\n",
    "\n",
    "\n",
    "def build_inverter(model_name, iteration=100, regularization_loss_weight=2):\n",
    "  \"\"\"Builds inverter\"\"\"\n",
    "  inverter = StyleGANInverter(\n",
    "      model_name,\n",
    "      learning_rate=0.01,\n",
    "      iteration=iteration,\n",
    "      reconstruction_loss_weight=1.0,\n",
    "      perceptual_loss_weight=5e-5,\n",
    "      regularization_loss_weight=regularization_loss_weight)\n",
    "  return inverter\n",
    "\n",
    "\n",
    "def get_generator(model_name):\n",
    "  \"\"\"Gets model by name\"\"\"\n",
    "  return build_generator(model_name)\n",
    "\n",
    "\n",
    "def align(inverter, image_path):\n",
    "  \"\"\"Aligns an unloaded image.\"\"\"\n",
    "  aligned_image = align_face(image_path,\n",
    "                             align_size=inverter.G.resolution)\n",
    "  return aligned_image\n",
    "\n",
    "\n",
    "def invert(inverter, image):\n",
    "  \"\"\"Inverts an image.\"\"\"\n",
    "  latent_code, reconstruction = inverter.easy_invert(image, num_viz=1)\n",
    "  return latent_code, reconstruction\n",
    "\n",
    "\n",
    "def diffuse(inverter, target, context, left, top, width, height):\n",
    "  \"\"\"Diffuses a target image to a context image.\"\"\"\n",
    "  center_x = left + width // 2\n",
    "  center_y = top + height // 2\n",
    "  _, diffusion = inverter.easy_diffuse(target=target,\n",
    "                                       context=context,\n",
    "                                       center_x=center_x,\n",
    "                                       center_y=center_y,\n",
    "                                       crop_x=width,\n",
    "                                       crop_y=height,\n",
    "                                       num_viz=1)\n",
    "  return diffusion\n",
    "\n",
    "\n",
    "def load_image(path):\n",
    "  \"\"\"Loads an image from disk.\n",
    "\n",
    "  NOTE: This function will always return an image with `RGB` channel order for\n",
    "  color image and pixel range [0, 255].\n",
    "\n",
    "  Args:\n",
    "    path: Path to load the image from.\n",
    "\n",
    "  Returns:\n",
    "    An image with dtype `np.ndarray` or `None` if input `path` does not exist.\n",
    "  \"\"\"\n",
    "  if not os.path.isfile(path):\n",
    "    return None\n",
    "\n",
    "  image = Image.open(path)\n",
    "  return image\n",
    "\n",
    "def imshow(images, col, viz_size=256):\n",
    "  \"\"\"Shows images in one figure.\"\"\"\n",
    "  num, height, width, channels = images.shape\n",
    "  assert num % col == 0\n",
    "  row = num // col\n",
    "\n",
    "  fused_image = np.zeros((viz_size * row, viz_size * col, channels), dtype=np.uint8)\n",
    "\n",
    "  for idx, image in enumerate(images):\n",
    "    i, j = divmod(idx, col)\n",
    "    y = i * viz_size\n",
    "    x = j * viz_size\n",
    "    if height != viz_size or width != viz_size:\n",
    "      image = cv2.resize(image, (viz_size, viz_size))\n",
    "    fused_image[y:y + viz_size, x:x + viz_size] = image\n",
    "\n",
    "  fused_image = np.asarray(fused_image, dtype=np.uint8)\n",
    "  data = io.BytesIO()\n",
    "  if channels == 4:\n",
    "    Image.fromarray(fused_image).save(data, 'png')\n",
    "  elif channels == 3:\n",
    "    Image.fromarray(fused_image).save(data, 'jpeg')\n",
    "  else:\n",
    "    raise ValueError('Image channel error')\n",
    "  im_data = data.getvalue()\n",
    "  disp = IPython.display.display(IPython.display.Image(im_data))\n",
    "  return disp\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "p_X7CplrD5yG"
   },
   "source": [
    "# Semantic Diffusion"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "3AkImJ0SaCiM"
   },
   "outputs": [],
   "source": [
    "inverter = build_inverter(model_name, iteration=100,\n",
    "                          regularization_loss_weight=0)\n",
    "output.clear()\n",
    "print('Upload the target image you want to diffuse or \\\n",
    "use the default image by clicking `Cancel upload` button.')\n",
    "uploaded = files.upload()\n",
    "if uploaded:\n",
    "  target_image_name = list(uploaded.keys())[0]\n",
    "  target_image = align(inverter, target_image_name)\n",
    "  os.remove(target_image_name)\n",
    "  if target_image.shape[2] == 4: # in case of image have four channels\n",
    "    target_image = target_image[:, :, :3]\n",
    "else:\n",
    "  target_name = '000006.png'\n",
    "  im_name = os.path.join(pre, target_name)\n",
    "  target_image = align(inverter, im_name)\n",
    "print('Target image ready!!!')\n",
    "\n",
    "print('Preparing the context images')\n",
    "context_names = ['000001.png' , '000008.png', '000018.png', '000019.png']\n",
    "context_images = []\n",
    "for img_name in context_names:\n",
    "  im_name = os.path.join(pre, img_name)\n",
    "  context_images.append(align(inverter, im_name))\n",
    "context_images = np.asarray(context_images)\n",
    "print('Both the target image and context images are ready,\\\n",
    " please use the next block to diffuse!!!')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "T1_BQ1pmD-Xs"
   },
   "outputs": [],
   "source": [
    "#@title { display-mode: \"form\", run: \"auto\" }\n",
    "\n",
    "import ipywidgets as widgets\n",
    "from IPython.display import display\n",
    "\n",
    "showed_fuse = []\n",
    "showed_mask = []\n",
    "size = inverter.G.resolution\n",
    "crop_size = 100#@param {type:\"slider\", min:60, max:150, step:5}\n",
    "center_x = 145\n",
    "center_y = 125\n",
    "top = center_x - crop_size // 2\n",
    "left = center_y - crop_size // 2\n",
    "width = crop_size\n",
    "height = crop_size\n",
    "target_mask = np.ones((size, size, 1), np.uint8) * 128\n",
    "target_mask[top:top + height, left:left + width] = 255\n",
    "context_mask = np.ones((size, size, 1), np.uint8) * 255\n",
    "context_mask[top:top + height, left:left + width] = 128\n",
    "mask_aug = np.ones((size, size, 1), np.uint8) * 255\n",
    "\n",
    "showed_mask.append(np.ones((size, size, 4), np.uint8))\n",
    "for ind in range(context_images.shape[0]):\n",
    "  context_image = context_images[ind]\n",
    "  masked_context_image = np.concatenate([context_image, context_mask], axis=2)\n",
    "  showed_mask.append(masked_context_image)\n",
    "\n",
    "masked_target_image = np.concatenate([target_image, target_mask], axis=2)\n",
    "showed_fuse.append(masked_target_image)\n",
    "for ind in range(context_images.shape[0]):\n",
    "  paste_image = context_images[ind].copy()\n",
    "  paste_image[top:top + height, left:left + width] = \\\n",
    "  target_image[top:top + height, left:left + width].copy()\n",
    "  showed_fuse.append(np.concatenate([paste_image, mask_aug], axis=2))\n",
    "\n",
    "def Diffuse(a):\n",
    "  showed_fuse_ = []\n",
    "  showed_fuse_.append(masked_target_image)\n",
    "  diffused_images = diffuse(inverter,\n",
    "                            target_image,\n",
    "                            context_images,\n",
    "                            left, \n",
    "                            top,\n",
    "                            width,\n",
    "                            height)\n",
    "  for key, images in diffused_images.items():\n",
    "    diffused_image = np.concatenate([images[-1], mask_aug], axis=2)\n",
    "    showed_fuse_.append(diffused_image)\n",
    "  showed_images = np.asarray(showed_mask + showed_fuse_)\n",
    "  output.clear()\n",
    "  imshow(showed_images, col=len(showed_mask))\n",
    "\n",
    "button = widgets.Button(description=\"Start Diffuse!\")\n",
    "button.on_click(Diffuse)\n",
    "display(button)\n",
    "\n",
    "showed_images = np.asarray(showed_mask + showed_fuse)\n",
    "imshow(showed_images, col=len(showed_mask))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "Smk6O6D0ECr0"
   },
   "source": [
    "# Manipulation"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "JscvrP2yNMkU"
   },
   "outputs": [],
   "source": [
    "print('Building inverter')\n",
    "inverter = build_inverter(model_name=model_name)\n",
    "print('Building generator')\n",
    "generator = get_generator(model_name)\n",
    "output.clear()\n",
    "print('Please upload the image you want to manipulate or \\\n",
    "use the default images by clicking `Cancel upload` button.')\n",
    "uploaded = files.upload()\n",
    "if uploaded:\n",
    "  image_name = list(uploaded.keys())[0]\n",
    "  mani_image = align(inverter, image_name)\n",
    "  if mani_image.shape[2] == 4:\n",
    "    mani_image = mani_image[:, :, :3]\n",
    "  os.remove(image_name)\n",
    "else:\n",
    "  image_name = '000006.png'\n",
    "  im_name = os.path.join(pre, image_name)\n",
    "  mani_image = align(inverter, im_name)\n",
    "print('Image ready, starting inversion!!!')\n",
    "sys.stdout.flush()\n",
    "\n",
    "latent_code_path = os.path.join(inverted_code_dir, \n",
    "                                image_name.split('.')[0] + '.npy')\n",
    "if not os.path.exists(latent_code_path):\n",
    "  latent_code, _ = invert(inverter, mani_image)\n",
    "  np.save(latent_code_path, latent_code)\n",
    "else:\n",
    "  print('code already exists, skip inversion!!!')\n",
    "  latent_code = np.load(latent_code_path)\n",
    "\n",
    "ATTRS = ['age', 'eyeglasses', 'gender', 'pose', 'expression']\n",
    "boundaries = {}\n",
    "for attr in ATTRS:\n",
    "  boundary_path = os.path.join('./boundaries', \n",
    "                               'stylegan_ffhq256', attr + '.npy')\n",
    "  boundary_file = np.load(boundary_path, allow_pickle=True)[()]\n",
    "  boundary = boundary_file['boundary']\n",
    "  manipulate_layers = boundary_file['meta_data']['manipulate_layers']\n",
    "  boundaries[attr] = []\n",
    "  boundaries[attr].append(boundary)\n",
    "  boundaries[attr].append(manipulate_layers)\n",
    "print()\n",
    "print('Image inversion completed, please use the next block to manipulate!!!')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "kiO5MD_VEBOq"
   },
   "outputs": [],
   "source": [
    "#@title { display-mode: \"form\", run: \"auto\" }\n",
    "\n",
    "age = 0 #@param {type:\"slider\", min:-3.0, max:3.0, step:0.1}\n",
    "eyeglasses = -0.1 #@param {type:\"slider\", min:-2.9, max:3.0, step:0.1}\n",
    "gender = 0 #@param {type:\"slider\", min:-3.0, max:3.0, step:0.1}\n",
    "pose = 0 #@param {type:\"slider\", min:-3.0, max:3.0, step:0.1}\n",
    "expression = -0.1 #@param {type:\"slider\", min:-3.0, max:3.0, step:0.1}\n",
    "\n",
    "\n",
    "new_codes = latent_code.copy()\n",
    "for i, attr_name in enumerate(ATTRS):\n",
    "  manipulate_layers = boundaries[attr_name][1]\n",
    "  new_codes[:, manipulate_layers, :] += boundaries[attr_name][0][:, manipulate_layers, :] * eval(attr_name)\n",
    "\n",
    "new_images = generator.easy_synthesize(new_codes, **{'latent_space_type': 'wp'})['image']\n",
    "showed_images = np.concatenate([mani_image[np.newaxis], new_images], axis=0)\n",
    "imshow(showed_images, col=showed_images.shape[0])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "doBHCHIPDipG"
   },
   "source": [
    "# Interpolation"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "pzrqYGlVTTon"
   },
   "outputs": [],
   "source": [
    "print('Building inverter')\n",
    "inverter = build_inverter(model_name=model_name)\n",
    "print('Building generator')\n",
    "generator = get_generator(model_name)\n",
    "output.clear()\n",
    "def linear_interpolate(src_code, dst_code, step=5):\n",
    "  \"\"\"Interpolates two latent codes linearlly.\n",
    "  Args:\n",
    "    src_code: Source code, with shape [1, latent_space_dim].\n",
    "    dst_code: Target code, with shape [1, latent_space_dim].\n",
    "    step: Number of interploation steps. (default: 5)\n",
    "  Returns:\n",
    "    Interpolated code, with shape [step, latent_space_dim].\n",
    "  \"\"\"\n",
    "  assert (len(src_code.shape) == 2 and len(dst_code.shape) == 2 and\n",
    "          src_code.shape[0] == 1 and dst_code.shape[0] == 1 and\n",
    "          src_code.shape[1] == dst_code.shape[1])\n",
    "\n",
    "  linspace = np.linspace(0.0, 1.0, step)[:, np.newaxis].astype(np.float32)\n",
    "  return src_code + linspace * (dst_code - src_code)\n",
    "\n",
    "print('Please upload the source image or \\\n",
    "use the default image by clicking `Cancel upload` button.')\n",
    "uploaded = files.upload()\n",
    "if uploaded:\n",
    "  src_image_name = list(uploaded.keys())[0]\n",
    "  src_image = align(inverter, src_image_name)\n",
    "  if src_image.shape[2] == 4:\n",
    "    src_image = src_image[:, :, :3]\n",
    "  os.remove(src_image_name)\n",
    "else:\n",
    "  src_image_name = '000008.png'\n",
    "  im_name = os.path.join(pre, src_image_name)\n",
    "  src_image = align(inverter, im_name)\n",
    "print('Source image ready!!!')\n",
    "src_code_path = os.path.join(inverted_code_dir, \n",
    "                             src_image_name.split('.')[0] + '.npy')\n",
    "\n",
    "print('Please upload the target image or \\\n",
    "use the default image by clicking `Cancel upload` button.')\n",
    "uploaded = files.upload()\n",
    "if uploaded:\n",
    "  dst_image_name = list(uploaded.keys())[0]\n",
    "  dst_image = align(inverter, dst_image_name)\n",
    "  if dst_image.shape[2] == 4:\n",
    "    dst_image = dst_image[:, :, :3]\n",
    "  os.remove(dst_image_name)\n",
    "else:\n",
    "  dst_image_name = '000013.png'\n",
    "  im_name = os.path.join(pre, dst_image_name)\n",
    "  dst_image = align(inverter, im_name)\n",
    "print('Target image ready!!!')\n",
    "sys.stdout.flush()\n",
    "dst_code_path = os.path.join(inverted_code_dir, \n",
    "                             dst_image_name.split('.')[0] + '.npy')\n",
    "\n",
    "if not os.path.exists(src_code_path):\n",
    "  src_code, _ = invert(inverter, src_image)\n",
    "  np.save(src_code_path, src_code)\n",
    "else:\n",
    "  src_code = np.load(src_code_path)\n",
    "\n",
    "\n",
    "if not os.path.exists(dst_code_path):\n",
    "  dst_code, _ = invert(inverter, dst_image)\n",
    "  np.save(dst_code_path, dst_code)\n",
    "else:\n",
    "  dst_code = np.load(dst_code_path)\n",
    "print()\n",
    "print('Both the source image and target image are inverted, \\\n",
    "please use the next block to interpolate!!!')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "PN0yDtOzddk_"
   },
   "outputs": [],
   "source": [
    "#@title { display-mode: \"form\", run: \"auto\" }\n",
    "step = 5 #@param {type:\"slider\", min:1, max:10, step:1}\n",
    "\n",
    "inter_images = []\n",
    "inter_images.insert(0, dst_image)\n",
    "inter_images.insert(-1, src_image)\n",
    "\n",
    "inter_codes = linear_interpolate(np.reshape(src_code, [1, -1]),\n",
    "                                 np.reshape(dst_code, [1, -1]),\n",
    "                                 step=step)\n",
    "inter_codes = np.reshape(inter_codes, [-1, inverter.G.num_layers, inverter.G.w_space_dim])\n",
    "inter_imgs = generator.easy_synthesize(inter_codes, **{'latent_space_type': 'wp'})['image']\n",
    "\n",
    "for ind in range(inter_imgs.shape[0]):\n",
    "  inter_images.insert(ind+1, inter_imgs[ind])\n",
    "\n",
    "inter_images = np.asarray(inter_images)\n",
    "imshow(inter_images, col=inter_images.shape[0])\n"
   ]
  }
 ],
 "metadata": {
  "accelerator": "GPU",
  "colab": {
   "collapsed_sections": [
    "ghyQbUni7v5O",
    "GPRv4j9q8ClG",
    "doBHCHIPDipG"
   ],
   "name": "in-domain.ipynb",
   "private_outputs": true,
   "provenance": [],
   "toc_visible": true
  },
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
