{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# HouseGAN\n",
    "![framework](assets/framework_housegan.jpg)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import sys\n",
    "import os\n",
    "import numpy as np\n",
    "from imp import reload\n",
    "\n",
    "import torch\n",
    "import torchvision.transforms as transforms\n",
    "from torchvision.utils import save_image\n",
    "from torch.autograd import Variable\n",
    "\n",
    "sys.path.append(\"./housegan/\")\n",
    "from models import Discriminator, Generator, compute_gradient_penalty, weights_init_normal\n",
    "from floorplan_dataset_maps import FloorplanGraphDataset, floorplan_collate_fn\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import housegan_utils\n",
    "reload(housegan_utils)\n",
    "from housegan_utils import get_opt\n",
    "opt = get_opt()\n",
    "print(opt)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n",
    "device = torch.device(\"cuda:0\")\n",
    "lambda_gp = 10\n",
    "multi_gpu = True\n",
    "# checkpoint\n",
    "checkpoint_generator = \"./housegan/checkpoints/generator_exp_example_D_500000.pth\"\n",
    "checkpoint_discriminator = \"./housegan/checkpoints/discriminator_exp_example_D_500000.pth\"\n",
    "# Loss function\n",
    "adversarial_loss = torch.nn.BCEWithLogitsLoss()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "generator = Generator()\n",
    "discriminator = Discriminator()\n",
    "generator.to(device)\n",
    "discriminator.to(device)\n",
    "adversarial_loss.to(device)\n",
    "generator.load_state_dict(torch.load(checkpoint_generator))\n",
    "discriminator.load_state_dict(torch.load(checkpoint_discriminator))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print(\"load datasets...\")\n",
    "rooms_path = './housegan/dataset'#'/home/nelson/Workspace/autodesk/housegan/'\n",
    "fp_dataset_train = FloorplanGraphDataset(rooms_path, transforms.Normalize(mean=[0.5], std=[0.5]), target_set=opt.target_set)\n",
    "#fp_subdataset_train = torch.utils.data.Subset(fp_dataset_train, range(0, len(fp_dataset_train)//100))\n",
    "fp_dataloader_train = torch.utils.data.DataLoader(fp_dataset_train, \n",
    "                                        batch_size=64,#opt.batch_size, \n",
    "                                        shuffle=True,\n",
    "                                        num_workers=opt.n_cpu,\n",
    "                                        collate_fn=floorplan_collate_fn,\n",
    "                                        pin_memory = True)\n",
    "fp_dataset_test = FloorplanGraphDataset(rooms_path, transforms.Normalize(mean=[0.5], std=[0.5]), target_set=opt.target_set, split='eval')\n",
    "fp_dataloader_val = torch.utils.data.DataLoader(fp_dataset_test, \n",
    "                                        batch_size=64,#opt.batch_size, \n",
    "                                        shuffle=True,\n",
    "                                        num_workers=opt.n_cpu,\n",
    "                                        collate_fn=floorplan_collate_fn,\n",
    "                                        pin_memory = True)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Saliency Map\n",
    "[1] Understanding neural networks through deep visualization  \n",
    "[2] Deep inside convolution networks visualising image classification models and saliency maps"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "\n",
    "import saliency_map\n",
    "reload(saliency_map)\n",
    "from saliency_map import draw_image_with_grad\n",
    "\n",
    "import housegan_utils\n",
    "reload(housegan_utils)\n",
    "from housegan_utils import (\n",
    "    batch_split_with_grad,\n",
    "    multichannel_layout_to_rgb_layout,\n",
    "    show_saliency\n",
    ")\n",
    "\n",
    "# ./housegan/\n",
    "from utils import ColorPalette, ROOM_CLASS\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Configure data loader\n",
    "rooms_path = './housegan/dataset'#'/home/nelson/Workspace/autodesk/housegan/'\n",
    "fp_dataset_train = FloorplanGraphDataset(rooms_path, transforms.Normalize(mean=[0.5], std=[0.5]), target_set=opt.target_set)\n",
    "fp_loader = torch.utils.data.DataLoader(fp_dataset_train, \n",
    "                                        batch_size=opt.batch_size, \n",
    "                                        shuffle=False,\n",
    "                                        num_workers=opt.n_cpu,\n",
    "                                        collate_fn=floorplan_collate_fn)\n",
    "\n",
    "fp_dataset_test = FloorplanGraphDataset(rooms_path, transforms.Normalize(mean=[0.5], std=[0.5]), target_set=opt.target_set, split='eval')\n",
    "fp_loader_test = torch.utils.data.DataLoader(fp_dataset_test, \n",
    "                                        batch_size=64, \n",
    "                                        shuffle=False,\n",
    "                                        num_workers=opt.n_cpu,\n",
    "                                        collate_fn=floorplan_collate_fn)\n",
    "\n",
    "# Optimizers\n",
    "optimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.g_lr, betas=(opt.b1, opt.b2)) \n",
    "optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.d_lr, betas=(opt.b1, opt.b2))\n",
    "Tensor = torch.cuda.FloatTensor"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "for i, batch in enumerate(fp_loader):\n",
    "    # Unpack batch\n",
    "    (\n",
    "        mks, # GT, 平面图，每个房间（node）一个通道, (n_room, img_size, img_size), 280x32x32\n",
    "        nds, # GT, 每个房间类型的onehot编码, (n_room, 10), 280x10\n",
    "        eds, # GT, 房间的邻边, (n_edge,3), 1229x3\n",
    "        nd_to_sample, # GT, 房间属于batch中的哪个sample\n",
    "        ed_to_sample # GT, 邻边属于batch中的哪个sample\n",
    "    ) = batch\n",
    "\n",
    "    indices = nd_to_sample, ed_to_sample\n",
    "    # Adversarial ground truths\n",
    "    batch_size = torch.max(nd_to_sample) + 1\n",
    "    valid = Variable(Tensor(batch_size, 1)\\\n",
    "                        .fill_(1.0), requires_grad=False)\n",
    "    fake = Variable(Tensor(batch_size, 1)\\\n",
    "                    .fill_(0.0), requires_grad=False)\n",
    "\n",
    "    # Configure input\n",
    "    real_mks = Variable(mks.type(Tensor))\n",
    "    given_nds = Variable(nds.type(Tensor))\n",
    "    given_eds = eds\n",
    "\n",
    "    # grad for saliency map\n",
    "    real_mks.requires_grad_()\n",
    "    given_nds.requires_grad_()\n",
    "    \n",
    "    # Set grads on\n",
    "    for p in discriminator.parameters():\n",
    "        p.requires_grad = True\n",
    "\n",
    "    # ---------------------\n",
    "    #  Train Discriminator\n",
    "    # ---------------------\n",
    "    optimizer_D.zero_grad()\n",
    "    \n",
    "    # Generate a batch of images\n",
    "    z_shape = [real_mks.shape[0], opt.latent_dim]\n",
    "    z = Variable(Tensor(np.random.normal(0, 1, tuple(z_shape))))\n",
    "\n",
    "    gen_mks = generator(z, given_nds, given_eds)\n",
    "    \n",
    "    # Real images\n",
    "    real_validity = discriminator(real_mks, given_nds, given_eds, nd_to_sample)\n",
    "        \n",
    "    # Fake images\n",
    "    fake_validity = discriminator(gen_mks.detach(), given_nds.detach(), \\\n",
    "                                        given_eds.detach(), nd_to_sample.detach())\n",
    "\n",
    "    # Measure discriminator's ability to classify real from generated samples\n",
    "    gradient_penalty = compute_gradient_penalty(discriminator, real_mks.data, \\\n",
    "                                                    gen_mks.data, given_nds.data, \\\n",
    "                                                    given_eds.data, nd_to_sample.data, \\\n",
    "                                                    None, None)\n",
    "    d_loss = -torch.mean(real_validity) + torch.mean(fake_validity) \\\n",
    "                #+ lambda_gp * gradient_penalty\n",
    "    d_loss.backward()\n",
    "\n",
    "    break\n",
    "    # -----------------\n",
    "    #  Train Generator\n",
    "    # -----------------\n",
    "    optimizer_G.zero_grad()\n",
    "    \n",
    "    # Set grads off\n",
    "    for p in discriminator.parameters():\n",
    "        p.requires_grad = False\n",
    "\n",
    "    # Generate a batch of images\n",
    "    z = Variable(Tensor(np.random.normal(0, 1, tuple(z_shape))))\n",
    "    gen_mks = generator(z, given_nds, given_eds)\n",
    "\n",
    "    # Score fake images\n",
    "    fake_validity = discriminator(gen_mks, given_nds, given_eds, nd_to_sample)\n",
    "        \n",
    "    # Update generator\n",
    "    g_loss = -torch.mean(fake_validity)\n",
    "    g_loss.backward()\n",
    "    break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "layouts, layouts_grads, nodes_types_onehot, nodes_grads, edges = batch_split_with_grad(real_mks, given_nds, given_eds, nd_to_sample, ed_to_sample)\n",
    "image, layout_saliency = draw_image_with_grad(layouts,layouts_grads)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<matplotlib.image.AxesImage at 0x7fc5ad256ee0>"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    },
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPsAAAD7CAYAAACscuKmAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8QVMy6AAAACXBIWXMAAAsTAAALEwEAmpwYAAAOS0lEQVR4nO3dX4hc533G8e9T2UauE2ulOhJby/XIIOKYUK+CUB1UysaKguqayDcuMUlYhMreuKDQFFeqoTQXBUEhuBelsCS2FuImFUlcCV8kEZsobSEolmMpkSIrct2xLLTWto7lpKUOtfPrxRztzq53vaOZM+eM9Hs+IM77vvPn/HS0z573zBydo4jAzK5/v1F3AWZWDYfdLAmH3SwJh90sCYfdLAmH3SyJnsIuaYeks5JekrS3rKLMrHzq9nt2SSuAnwHbgQvAc8AjEfHT8sozs7Lc0MNrtwAvRcTLAJK+BuwElgy7JJ/BY9ZnEaHFxnuZxt8OvNrWv1CMmdkA6mXPvthvj3ftuSWNA+M9rMfMStBL2C8Ad7T11wMXFz4pIiaACfA03qxOvUzjnwM2Stog6SbgU8Dhcsoys7J1vWePiLcl/SnwbWAF8GREnC6tMjMrVddfvXW1Mk/jzfquH5/Gm9k1xGE3S8JhN0uil6/erimNRqPuEuw613zjjbnOm2/WV8gSvGc3S8JhN0vCYTdLIs0xe7vmnj1znZGR2uqwa19j1662TmOuffJk5bUsx3t2syQcdrMkUk7j503dR0frqsKuN0NDdVfwnrxnN0vCYTdLwmE3S8JhN0vCYTdLwmE3S8JhN0vCYTdLwmE3S8JhN0vCYTdLwmE3S8JhN0vCYTdLYtmwS3pS0oykU21jayQdkXSuWK7ub5lm1qtO9uwHgB0LxvYCUxGxEZgq+mY2wJYNe0T8C/DzBcM7gcmiPQk8VG5ZZla2bo/Z10XENECxXFteSWbWD32/LJWkcWC83+sxs/fWbdgvSRqOiGlJw8DMUk+MiAlgAgbnls3zLv9r1oN5lyU/cKC2OjrR7TT+MDBWtMeAQ+WUY2b90slXb18FfgB8UNIFSbuB/cB2SeeA7UXfzAbYstP4iHhkiYe2lVyLmfWRIqo7jK71mH3Vqrl2+216Bvxa3zbgLl+eazebc+0ab9kcEVps3KfLmiXhsJslkeb2T43Vc6fvP/Xg3B02R++po5rBtuHxxmz7qU83Z9veVu/Wvq3u/O25qfv365vFL8l7drMkHHazJBx2syQcdrMkHHazJBx2syQcdrMkHHazJBx2syQcdrMkHHazJBx2syQcdrMkHHazJBx2syQcdrMkHHazJBx2syQcdrMkHHazJBx2syQ6uf3THZK+J+mMpNOS9hTjayQdkXSuWK5e7r3MrD6d7NnfBj4fER8C7gMelXQPsBeYioiNwFTRN7MBtWzYI2I6In5UtH8JnAFuB3YCk8XTJoGH+lSjmZXgqo7ZJTWATcAxYF1ETEPrFwKwtvTqzKw0Hd8RRtL7gG8An4uIX0iL3jtusdeNA+PdlWdmZelozy7pRlpBfzoivlkMX5I0XDw+DMws9tqImIiIzRGxuYyCzaw7nXwaL+DLwJmI+GLbQ4eBsaI9BhwqvzwzK0sn0/itwGeBn0g6UYz9JbAfOChpN3AeeLgvFZpZKZYNe0T8G7DUAfq2cssxs37xGXRmSTjsZkk47GZJOOxmSTjsZkk47GZJOOxmSTjsZkk47GZJOOxmSTjsZkk47GZJOOxmSTjsZkk47GZJOOxmSTjsZkk47GZJOOxmSTjsZkk47GZJOOxmSTjsZkk47GZJOOxmSXRyr7eVkn4o6aSk05K+UIyvkXRE0rliubr/5ZpZtzrZs/8KuD8i7gVGgB2S7gP2AlMRsRGYKvpmNqCWDXu0/HfRvbH4E8BOYLIYnwQe6keBZlaOTu/PvqK4g+sMcCQijgHrImIaoFiu7VuVZtazjsIeEe9ExAiwHtgi6cOdrkDSuKTjko53WaOZleCqPo2PiMvAUWAHcEnSMECxnFniNRMRsTkiNvdWqpn1opNP4z8gaaho3wx8HHgROAyMFU8bAw71qUYzK8ENHTxnGJiUtILWL4eDEfGspB8AByXtBs4DD/exTjPr0bJhj4gfA5sWGX8d2NaPosysfD6DziwJh90sCYfdLAmH3SwJh90sCYfdLAmH3SwJh90sCYfdLAmH3SwJh90sCYfdLAmH3SwJh90sCYfdLAmH3SwJh90siU4uS3Xd2fV0o+4S+qbZfGO2HU+/WWMlczY83qi7hEp8/8yqtt5gbPt23rObJeGwmyWRchrfbO5p643UVUYpGo1dC0fa2icrrKQz87c9XF/bv9HWHrxt7z27WRIOu1kSDrtZEimP2ecfJ47WVEO/DM22yvjKq/yvKUcW9EdLfv86DdVdwHvqeM9e3Lb5BUnPFv01ko5IOlcsV/evTDPr1dVM4/cAZ9r6e4GpiNgITBV9MxtQHU3jJa0H/gj4G+DPiuGdzM3BJmndyvkvyi3PenHnzc2uXnfilbl247a59tAt3dXxyv82unuhlarTPfsTwGPAr9vG1kXENECxXFtuaWZWpk7uz/4gMBMRz3ezAknjko5LOt7N682sHJ1M47cCn5T0ALASuFXSV4BLkoYjYlrSMDCz2IsjYgKYAJAUJdVtZlepk/uz7wP2AUgaBf48Ij4j6W+BMWB/sTzUvzKtG0fPlPC79Xzvb7GhsaH3N7Ge9XJSzX5gu6RzwPaib2YD6qpOqomIo7Q+dSciXge2lV+SmfWDT5c1S8JhN0vCYTdLwmE3S8JhN0vCYTdLwmE3S8JhN0vCYTdLwmE3S0IR1f1HtDr/11uj0ahr1X317uuwH5htBSeqLGVJ1/N/hJm//Q+0teu7bnxEaLFx79nNknDYzZJw2M2SSHPMDu230220tYeqLaN0lxf0m7OteNdj9dC8bdxY8OgQ17bLbe1mW7u+Wzb7mN0sOYfdLIk0t39qNOZuWPNU84nZ9uh1dfuhwdS+7UdHNy14rFFxNeU6cOBAW2/u79ls1jeNX4r37GZJOOxmSTjsZkk47GZJOOxmSTjsZkk47GZJdHp/9ibwS+Ad4O2I2CxpDfBPtM5/bAJ/HBFv9KdMM+vV1ezZPxYRIxGxuejvBaYiYiMwVfTNbED1Mo3fCUwW7UngoZ6rMbO+6TTsAXxH0vOSxouxdRExDVAs1/ajQDMrR6fnxm+NiIuS1gJHJL3Y6QqKXw7jyz7RzPqqoz17RFwsljPAM8AW4JKkYYBiObPEayciYnPbsb6Z1WDZsEu6RdL7r7SBTwCngMPAWPG0MeBQv4o0s951Mo1fBzwj6crz/zEiviXpOeCgpN3AeeDh/pVpZr1aNuwR8TJw7yLjrwPb+lGUmZXPZ9CZJeGwmyXhsJsl4bCbJeGwmyXhsJsl4bCbJeGwmyXhsJsl4bCbJeGwmyXhsJsl4bCbJeGwmyXhsJsl4bCbJeGwmyXhsJsl4bCbJeGwmyXhsJsl4bCbJeGwmyXhsJsloYiobmVSdStboNFo1LVqu47dfffds+0TJ07Mtl977bUaqmmJCC023tGeXdKQpK9LelHSGUkflbRG0hFJ54rl6nJLNrMydTqN/zvgWxFxN61bQZ0B9gJTEbERmCr6Zjaglp3GS7oVOAncFW1PlnQWGI2I6eKWzUcj4oPLvFdt0/hVrJptN2jMtocYqr6YZI6tPDbbHhoamvfYypUrK66mXG+99dZs+/Lly4uOV62XafxdwH8CT0l6QdKXils3r4uI6eLNp4G1pVVrZqXrJOw3AB8B/iEiNgH/w1VM2SWNSzou6XiXNZpZCToJ+wXgQkRcmYt9nVb4LxXTd4rlzGIvjoiJiNgcEZvLKNjMutPRV2+S/hX4k4g4K+mvgVuKh16PiP2S9gJrIuKxZd6ntmN2syyWOmbvNOwjwJeAm4CXgV20ZgUHgd8BzgMPR8TPl3kfh92sz3oKe1kcdrP+6+mkGjO79jnsZkk47GZJOOxmSTjsZkk47GZJOOxmSdxQ8fr+C3gFuK1o1811zOc65huEOq62hjuXeqDSk2pmVyodH4Rz5V2H6xj0OsqswdN4syQcdrMk6gr7RE3rXch1zOc65huEOkqroZZjdjOrnqfxZklUGnZJOySdlfRSccGLqtb7pKQZSafaxiq/FLakOyR9r7gc92lJe+qoRdJKST+UdLKo4wt11NFWz4ri+obP1lWHpKakn0g6ceUSajXV0bfLtlcWdkkrgL8H/hC4B3hE0j0Vrf4AsGPBWB2Xwn4b+HxEfAi4D3i02AZV1/Ir4P6IuBcYAXZIuq+GOq7YQ+vy5FfUVcfHImKk7auuOuro32XbI6KSP8BHgW+39fcB+ypcfwM41dY/CwwX7WHgbFW1tNVwCNheZy3AbwI/An6vjjqA9cUP8P3As3X92wBN4LYFY5XWAdwK/AfFZ2ll11HlNP524NW2/oVirC61XgpbUgPYBByro5Zi6nyC1oVCj0TrgqJ1bJMngMeAX7eN1VFHAN+R9Lyk8Zrq6Otl26sM+2KXykn5VYCk9wHfAD4XEb+oo4aIeCciRmjtWbdI+nDVNUh6EJiJiOerXvcitkbER2gdZj4q6Q9qqKGny7Yvp8qwXwDuaOuvBy5WuP6FOroUdtkk3Ugr6E9HxDfrrAUgIi4DR2l9plF1HVuBT0pqAl8D7pf0lRrqICIuFssZ4BlgSw119HTZ9uVUGfbngI2SNki6CfgUcLjC9S90GBgr2mO0jp/7SpKALwNnIuKLddUi6QOShor2zcDHgRerriMi9kXE+oho0Pp5+G5EfKbqOiTdIun9V9rAJ4BTVdcREa8Br0q6chu1bcBPS6uj3x98LPig4QHgZ8C/A49XuN6vAtPA/9H67bkb+C1aHwydK5ZrKqjj92kduvwYOFH8eaDqWoDfBV4o6jgF/FUxXvk2aatplLkP6KreHnfRup/hSeD0lZ/Nmn5GRoDjxb/NPwOry6rDZ9CZJeEz6MyScNjNknDYzZJw2M2ScNjNknDYzZJw2M2ScNjNkvh/N4G4U39ntPoAAAAASUVORK5CYII=",
      "text/plain": [
       "<Figure size 432x288 with 1 Axes>"
      ]
     },
     "metadata": {
      "needs_background": "light"
     },
     "output_type": "display_data"
    }
   ],
   "source": [
    "import housegan_utils\n",
    "reload(housegan_utils)\n",
    "from housegan_utils import draw_graph_with_saliency, show_saliency, multichannel_layout_to_rgb_layout\n",
    "# multichannel layout to rgb layout\n",
    "layouts_imgs = multichannel_layout_to_rgb_layout(layouts,nodes_types_onehot)\n",
    "plt.imshow(layouts_imgs[0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import housegan_utils\n",
    "reload(housegan_utils)\n",
    "from housegan_utils import draw_graph_with_saliency, show_saliency, multichannel_layout_to_rgb_layout\n",
    "graph_drawings = draw_graph_with_saliency(layouts, nodes_types_onehot, nodes_grads, edges,figsize=6)\n",
    "plt.imshow(graph_drawings[1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "32"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(layouts_imgs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [],
   "source": [
    "nrow=16\n",
    "dir_save=\"results_HouseGAN/saliency_map\"\n",
    "os.makedirs(dir_save,exist_ok=True)\n",
    "iterator = iter(fp_loader)\n",
    "for batch_id in range(8):\n",
    "    batch = iterator.next()\n",
    "    # Unpack batch\n",
    "    (\n",
    "        mks, # GT, 平面图，每个房间（node）一个通道, (n_room, img_size, img_size), 280x32x32\n",
    "        nds, # GT, 每个房间类型的onehot编码, (n_room, 10), 280x10\n",
    "        eds, # GT, 房间的邻边, (n_edge,3), 1229x3\n",
    "        nd_to_sample, # GT, 房间属于batch中的哪个sample\n",
    "        ed_to_sample # GT, 邻边属于batch中的哪个sample\n",
    "    ) = batch\n",
    "\n",
    "    indices = nd_to_sample, ed_to_sample\n",
    "    # Adversarial ground truths\n",
    "    batch_size = torch.max(nd_to_sample) + 1\n",
    "    valid = Variable(Tensor(batch_size, 1)\\\n",
    "                        .fill_(1.0), requires_grad=False)\n",
    "    fake = Variable(Tensor(batch_size, 1)\\\n",
    "                    .fill_(0.0), requires_grad=False)\n",
    "\n",
    "    # Configure input\n",
    "    real_mks = Variable(mks.type(Tensor))\n",
    "    given_nds = Variable(nds.type(Tensor))\n",
    "    given_eds = eds\n",
    "\n",
    "    # grad for saliency map\n",
    "    real_mks.requires_grad_()\n",
    "    given_nds.requires_grad_()\n",
    "    \n",
    "    # Set grads on\n",
    "    for p in discriminator.parameters():\n",
    "        p.requires_grad = True\n",
    "\n",
    "    # ---------------------\n",
    "    #  Train Discriminator\n",
    "    # ---------------------\n",
    "    optimizer_D.zero_grad()\n",
    "    \n",
    "    # Generate a batch of images\n",
    "    z_shape = [real_mks.shape[0], opt.latent_dim]\n",
    "    z = Variable(Tensor(np.random.normal(0, 1, tuple(z_shape))))\n",
    "\n",
    "    gen_mks = generator(z, given_nds, given_eds)\n",
    "    \n",
    "    # Real images\n",
    "    real_validity = discriminator(real_mks, given_nds, given_eds, nd_to_sample)\n",
    "        \n",
    "    # Fake images\n",
    "    fake_validity = discriminator(gen_mks.detach(), given_nds.detach(), \\\n",
    "                                        given_eds.detach(), nd_to_sample.detach())\n",
    "\n",
    "    # Measure discriminator's ability to classify real from generated samples\n",
    "    gradient_penalty = compute_gradient_penalty(discriminator, real_mks.data, \\\n",
    "                                                    gen_mks.data, given_nds.data, \\\n",
    "                                                    given_eds.data, nd_to_sample.data, \\\n",
    "                                                    None, None)\n",
    "    d_loss = -torch.mean(real_validity) + torch.mean(fake_validity) \\\n",
    "                #+ lambda_gp * gradient_penalty\n",
    "    d_loss.backward()\n",
    "\n",
    "    layouts, layouts_grads, nodes_types_onehot, nodes_grads, edges = batch_split_with_grad(real_mks, given_nds, given_eds, nd_to_sample, ed_to_sample)\n",
    "    image, layout_saliency = draw_image_with_grad(layouts,layouts_grads)\n",
    "    layouts_imgs = multichannel_layout_to_rgb_layout(layouts,nodes_types_onehot)\n",
    "    graph_drawings = draw_graph_with_saliency(layouts, nodes_types_onehot, nodes_grads, edges,figsize=6)\n",
    "\n",
    "    for i in range(len(layouts_imgs)//nrow):\n",
    "        id_range=(i*nrow,(i+1)*nrow)\n",
    "        figure =show_saliency(layouts_imgs[id_range[0]:id_range[1]],layout_saliency[id_range[0]:id_range[1]],graph_drawings[id_range[0]:id_range[1]],nrow=nrow//2)\n",
    "        figure.savefig(os.path.join(dir_save,\"demo_{}_{}_batch_{}\".format(id_range[0],id_range[1],batch_id)))\n",
    "        plt.close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from utils import ID_COLOR,CLASS_ROM\n",
    "from tqdm.notebook import tqdm\n",
    "fp_subdataset_train = torch.utils.data.Subset(fp_dataset_train, range(0, len(fp_dataset_train)//10))\n",
    "fp_subdataset_dataloader_train = torch.utils.data.DataLoader(fp_subdataset_train, \n",
    "                                        batch_size=opt.batch_size, \n",
    "                                        shuffle=True,\n",
    "                                        num_workers=opt.n_cpu,\n",
    "                                        collate_fn=floorplan_collate_fn,\n",
    "                                        pin_memory = True)\n",
    "saliency_layouts=[]\n",
    "saliency_nodes_all={name:[] for name in CLASS_ROM.values()}\n",
    "for i, batch in enumerate(tqdm(fp_loader)):\n",
    "    # Unpack batch\n",
    "    (\n",
    "        mks, # GT, 平面图，每个房间（node）一个通道, (n_room, img_size, img_size), 280x32x32\n",
    "        nds, # GT, 每个房间类型的onehot编码, (n_room, 10), 280x10\n",
    "        eds, # GT, 房间的邻边, (n_edge,3), 1229x3\n",
    "        nd_to_sample, # GT, 房间属于batch中的哪个sample\n",
    "        ed_to_sample # GT, 邻边属于batch中的哪个sample\n",
    "    ) = batch\n",
    "\n",
    "    indices = nd_to_sample, ed_to_sample\n",
    "    # Adversarial ground truths\n",
    "    batch_size = torch.max(nd_to_sample) + 1\n",
    "    valid = Variable(Tensor(batch_size, 1)\\\n",
    "                        .fill_(1.0), requires_grad=False)\n",
    "    fake = Variable(Tensor(batch_size, 1)\\\n",
    "                    .fill_(0.0), requires_grad=False)\n",
    "\n",
    "    # Configure input\n",
    "    real_mks = Variable(mks.type(Tensor))\n",
    "    given_nds = Variable(nds.type(Tensor))\n",
    "    given_eds = eds\n",
    "\n",
    "    # grad for saliency map\n",
    "    real_mks.requires_grad_()\n",
    "    given_nds.requires_grad_()\n",
    "    \n",
    "    # Set grads on\n",
    "    for p in discriminator.parameters():\n",
    "        p.requires_grad = True\n",
    "\n",
    "    # ---------------------\n",
    "    #  Train Discriminator\n",
    "    # ---------------------\n",
    "    optimizer_D.zero_grad()\n",
    "    \n",
    "    # Generate a batch of images\n",
    "    z_shape = [real_mks.shape[0], opt.latent_dim]\n",
    "    z = Variable(Tensor(np.random.normal(0, 1, tuple(z_shape))))\n",
    "\n",
    "    gen_mks = generator(z, given_nds, given_eds)\n",
    "    \n",
    "    # Real images\n",
    "    real_validity = discriminator(real_mks, given_nds, given_eds, nd_to_sample)\n",
    "        \n",
    "    # Fake images\n",
    "    fake_validity = discriminator(gen_mks.detach(), given_nds.detach(), \\\n",
    "                                        given_eds.detach(), nd_to_sample.detach())\n",
    "\n",
    "    # Measure discriminator's ability to classify real from generated samples\n",
    "    gradient_penalty = compute_gradient_penalty(discriminator, real_mks.data, \\\n",
    "                                                    gen_mks.data, given_nds.data, \\\n",
    "                                                    given_eds.data, nd_to_sample.data, \\\n",
    "                                                    None, None)\n",
    "    d_loss = -torch.mean(real_validity) + torch.mean(fake_validity) \\\n",
    "                #+ lambda_gp * gradient_penalty\n",
    "    d_loss.backward()\n",
    "\n",
    "    layouts, layouts_grads, nodes_types_onehot, nodes_grads, edges = batch_split_with_grad(real_mks, given_nds, given_eds, nd_to_sample, ed_to_sample)\n",
    "    _, layout_saliency = draw_image_with_grad(layouts,layouts_grads)\n",
    "    saliency_layouts.append(layout_saliency)\n",
    "\n",
    "    for layout_id in range(len(nodes_types_onehot)):\n",
    "        nodes_types = nodes_types_onehot[layout_id].max(dim=-1).indices\n",
    "        nodes_grads_max = nodes_grads[layout_id].max(dim=-1).values\n",
    "        for k,v in zip(nodes_types,nodes_grads_max):\n",
    "            roomname= CLASS_ROM[k.item()+1]\n",
    "            saliency_nodes_all[roomname].append(v.item())\n",
    "    \n",
    "saliency_layouts = np.concatenate(saliency_layouts,axis=0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "saliency_layouts_mean = saliency_layouts.mean(axis=0)\n",
    "saliency_layouts_mean -= saliency_layouts_mean.min(axis=(0,1))\n",
    "saliency_layouts_mean /= (saliency_layouts_mean.max(axis=(0,1))+1e-8)\n",
    "plt.figure(figsize=(6,6))\n",
    "plt.title(\"Average saliency map for stacked raster rooms\",fontsize=20)\n",
    "plt.imshow(saliency_layouts_mean,alpha=0.5)\n",
    "plt.axis(\"off\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import saliency_map\n",
    "reload(saliency_map)\n",
    "from saliency_map import draw_average_saliency,draw_saliency_distribution\n",
    "def map_key_to_color(key):\n",
    "    return ID_COLOR[ROOM_CLASS[key]]\n",
    "\n",
    "draw_average_saliency(saliency_nodes_all,map_key_to_color)\n",
    "draw_saliency_distribution(saliency_nodes_all,map_key_to_color,figsize=(10,8))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "plt.hist(saliency_nodes_all['dining_room'],bins=20)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Linear classifier probes\n",
    "[1] Understanding intermediate layers using linear classifier probes"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "import pickle\n",
    "from housegan_utils import DISCRIMINATOR_MIDFEATURE_SIZE\n",
    "with open(\"housegan_probe_training_result/probe_loss_and_error_last.pkl\",\"rb\") as f:\n",
    "    loss_epochs, error_epochs, probe_stop = pickle.load(f)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "DISCRIMINATOR_MIDFEATURE_SIZE"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "for feature_name,stop_epochs in probe_stop.items():\n",
    "    print(feature_name,stop_epochs)\n",
    "    error_epochs[feature_name] = error_epochs[feature_name][:stop_epochs+1]\n",
    "    loss_epochs[feature_name] = loss_epochs[feature_name][:stop_epochs+1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "plt.figure(figsize=(15,8))\n",
    "y={}\n",
    "for feature_name in DISCRIMINATOR_MIDFEATURE_SIZE:\n",
    "    y[feature_name] = loss_epochs[feature_name][-1]\n",
    "y[\"features_cmp_2\"]+=0.006\n",
    "y[\"features_cmp_1\"]+=-0.006\n",
    "y[\"features_encoder\"]+=-0.003\n",
    "for feature_name in DISCRIMINATOR_MIDFEATURE_SIZE:\n",
    "    plt.plot(loss_epochs[feature_name])\n",
    "    plt.text(len(loss_epochs[feature_name]),y[feature_name],\"{}: {:.4f}\".format(feature_name.split(\"features_\")[-1],loss_epochs[feature_name][-1]),fontdict={'size':'13'})\n",
    "plt.legend(DISCRIMINATOR_MIDFEATURE_SIZE.keys())\n",
    "plt.title(\"BCE losses of Linear Classifier Probes during training\")\n",
    "plt.xlabel(\"epochs\")\n",
    "plt.ylabel(\"BCE loss\")\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "plt.figure(figsize=(15,8))\n",
    "y={}\n",
    "for feature_name in DISCRIMINATOR_MIDFEATURE_SIZE:\n",
    "    y[feature_name] = error_epochs[feature_name][-1]\n",
    "\n",
    "for feature_name in DISCRIMINATOR_MIDFEATURE_SIZE:\n",
    "    plt.plot(error_epochs[feature_name])\n",
    "    plt.text(len(error_epochs[feature_name]),y[feature_name],\"{}: {:.4f}\".format(feature_name.split(\"features_\")[-1],error_epochs[feature_name][-1]),fontdict={'size':'13'})\n",
    "plt.legend(DISCRIMINATOR_MIDFEATURE_SIZE.keys())\n",
    "plt.title(\"Errors of Linear Classifier Probes during training\")\n",
    "plt.xlabel(\"epoch\")\n",
    "plt.ylabel(\"Error rate\")\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from tqdm.notebook import tqdm\n",
    "discriminator.eval()\n",
    "Tensor=torch.cuda.FloatTensor\n",
    "\n",
    "thresholds=np.linspace(30,40,5)\n",
    "rate_errors=[]\n",
    "for t in thresholds:\n",
    "    n_errors = 0\n",
    "    for batch in tqdm(fp_dataloader_val):\n",
    "        # Unpack batch\n",
    "        mks, nds, eds, nd_to_sample, ed_to_sample = batch\n",
    "        # Configure input\n",
    "        real_mks = Variable(mks.type(Tensor)).to(device)\n",
    "        given_nds = Variable(nds.type(Tensor)).to(device)\n",
    "        given_eds = eds\n",
    "        # Generate a batch of images\n",
    "        z_shape = [real_mks.shape[0], opt.latent_dim]\n",
    "        z = Variable(Tensor(np.random.normal(0, 1, tuple(z_shape)))).to(device)\n",
    "        gen_mks = generator(z, given_nds, given_eds)\n",
    "\n",
    "        real_validity = discriminator(real_mks, given_nds, given_eds, nd_to_sample)\n",
    "        fake_validity = discriminator(gen_mks.detach(), given_nds.detach(), \\\n",
    "                                          given_eds.detach(), nd_to_sample.detach())\n",
    "        n_errors += (fake_validity>t).sum()\n",
    "        n_errors += (real_validity<=t).sum()\n",
    "    rate_error = n_errors/2./len(fp_dataloader_val.dataset)\n",
    "    rate_errors.append(rate_error.item())\n",
    "print(thresholds)\n",
    "print(rate_errors)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "x=[]\n",
    "y=[]\n",
    "for i,feature_name in enumerate(DISCRIMINATOR_MIDFEATURE_SIZE):\n",
    "    x.append(i)\n",
    "    y.append(error_epochs[feature_name][-1])\n",
    "plt.figure(figsize=(8,5))\n",
    "plt.plot(x,y,color=\"magenta\",linewidth=2)\n",
    "plt.plot(x,[min(rate_errors)]*len(x),\"--\",color=\"cyan\",)\n",
    "plt.ylim((0.0,1.))\n",
    "plt.ylabel(\"valid prediction error\")\n",
    "plt.xticks(x,[feature_name.replace(\"features_\",\"\") for feature_name in list(DISCRIMINATOR_MIDFEATURE_SIZE.keys())],rotation=60)\n",
    "plt.legend([\"probes\",\"whole discriminator\\n(model top layer)\"])\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "对于discriminator而言，各层的特征的线性可分性展示了对于平面图好坏的理解能力：\n",
    "- 随着层数深入，这种能力首先是逐渐递增的，说明神经网络学习到了部分建筑学知识\n",
    "- 而建筑问题的复杂性使得过小的特征不足以充分表达，因而在降采样后，线性可分性有所减弱"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print(\"load datasets...\")\n",
    "rooms_path = './housegan/dataset'#'/home/nelson/Workspace/autodesk/housegan/'\n",
    "fp_dataset_train = FloorplanGraphDataset(rooms_path, transforms.Normalize(mean=[0.5], std=[0.5]), target_set=opt.target_set)\n",
    "fp_subdataset_train = torch.utils.data.Subset(fp_dataset_train, range(0, len(fp_dataset_train)//100))\n",
    "fp_dataloader_train = torch.utils.data.DataLoader(fp_subdataset_train, \n",
    "                                        batch_size=64,#opt.batch_size, \n",
    "                                        shuffle=True,\n",
    "                                        num_workers=opt.n_cpu,\n",
    "                                        collate_fn=floorplan_collate_fn,\n",
    "                                        pin_memory = True)\n",
    "fp_dataset_test = FloorplanGraphDataset(rooms_path, transforms.Normalize(mean=[0.5], std=[0.5]), target_set=opt.target_set, split='eval')\n",
    "fp_dataloader_val = torch.utils.data.DataLoader(fp_dataset_test, \n",
    "                                        batch_size=64,#opt.batch_size, \n",
    "                                        shuffle=True,\n",
    "                                        num_workers=opt.n_cpu,\n",
    "                                        collate_fn=floorplan_collate_fn,\n",
    "                                        pin_memory = True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from housegan_utils import DISCRIMINATOR_MIDFEATURE_SIZE, train_probes, extract_midlayers_features\n",
    "from linear_classifier_probe import LinearClassifierProbe, train_probe_and_get_loss\n",
    "from tqdm.notebook import tqdm\n",
    "generator.eval()\n",
    "discriminator.eval()\n",
    "\n",
    "# 为每层的 features 分别初始化一个 LinearClassifierProbe, 并设置 optimizer\n",
    "print(\"init probes...\")\n",
    "probes = dict()\n",
    "probe_optimizers = dict()\n",
    "learning_rate =1e-5\n",
    "n_epochs=10000\n",
    "for feature_name, feature_length in DISCRIMINATOR_MIDFEATURE_SIZE.items():\n",
    "    probes[feature_name] = LinearClassifierProbe(n_in_features=feature_length, n_out_features=2, softmax_dim=-1)\n",
    "    probes[feature_name].to(device)\n",
    "    probes[feature_name].train()\n",
    "    probe_optimizers[feature_name] = torch.optim.Adam(probes[feature_name].parameters(), lr=learning_rate)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_statistic(dataloader):\n",
    "    Tensor = torch.cuda.FloatTensor \n",
    "    mean =  {feature_name:None for feature_name in DISCRIMINATOR_MIDFEATURE_SIZE.keys()}\n",
    "    std =  {feature_name:None for feature_name in DISCRIMINATOR_MIDFEATURE_SIZE.keys()}\n",
    "\n",
    "    #get mean\n",
    "    num_samples = {feature_name:0 for feature_name in DISCRIMINATOR_MIDFEATURE_SIZE.keys()}\n",
    "    for batch in tqdm(dataloader):\n",
    "        # Unpack batch\n",
    "        mks, nds, eds, nd_to_sample, ed_to_sample = batch\n",
    "        # Configure input\n",
    "        real_mks = Variable(mks.type(Tensor)).to(device)\n",
    "        given_nds = Variable(nds.type(Tensor)).to(device)\n",
    "        given_eds = eds\n",
    "        # Generate a batch of images\n",
    "        z_shape = [real_mks.shape[0], opt.latent_dim]\n",
    "        z = Variable(Tensor(np.random.normal(0, 1, tuple(z_shape)))).to(device)\n",
    "        gen_mks = generator(z, given_nds, given_eds)\n",
    "        midlayers_features = [extract_midlayers_features(discriminator, real_mks, given_nds, given_eds, nd_to_sample),extract_midlayers_features(discriminator, gen_mks, given_nds, given_eds, nd_to_sample)]\n",
    "        for feature_name in DISCRIMINATOR_MIDFEATURE_SIZE.keys():\n",
    "            batch_size = midlayers_features[0][feature_name].shape[0]\n",
    "            if mean[feature_name] is not None:\n",
    "                mean[feature_name] = (midlayers_features[0][feature_name].sum(axis=0)+midlayers_features[1][feature_name].sum(axis=0))/(num_samples[feature_name]+batch_size*2) \\\n",
    "                    + mean[feature_name]*(num_samples[feature_name]/(num_samples[feature_name]+batch_size*2))\n",
    "            else:\n",
    "                mean[feature_name] = (midlayers_features[0][feature_name].sum(axis=0)+midlayers_features[1][feature_name].sum(axis=0))/(batch_size*2)\n",
    "            num_samples[feature_name] += batch_size*2\n",
    "\n",
    "    # get std\n",
    "    num_samples = {feature_name:0 for feature_name in DISCRIMINATOR_MIDFEATURE_SIZE.keys()}\n",
    "    for batch in tqdm(fp_dataloader_train):\n",
    "        # Unpack batch\n",
    "        mks, nds, eds, nd_to_sample, ed_to_sample = batch\n",
    "        # Configure input\n",
    "        real_mks = Variable(mks.type(Tensor)).to(device)\n",
    "        given_nds = Variable(nds.type(Tensor)).to(device)\n",
    "        given_eds = eds\n",
    "        # Generate a batch of images\n",
    "        z_shape = [real_mks.shape[0], opt.latent_dim]\n",
    "        z = Variable(Tensor(np.random.normal(0, 1, tuple(z_shape)))).to(device)\n",
    "        gen_mks = generator(z, given_nds, given_eds)\n",
    "        midlayers_features = [extract_midlayers_features(discriminator, real_mks, given_nds, given_eds, nd_to_sample),extract_midlayers_features(discriminator, gen_mks, given_nds, given_eds, nd_to_sample)]\n",
    "        for feature_name in DISCRIMINATOR_MIDFEATURE_SIZE.keys():\n",
    "            batch_size = midlayers_features[0][feature_name].shape[0]\n",
    "            if std[feature_name] is not None:\n",
    "                std[feature_name] = (((midlayers_features[0][feature_name]-mean[feature_name])**2).sum(axis=0)+((midlayers_features[1][feature_name]-mean[feature_name])**2).sum(axis=0))/(num_samples[feature_name]+batch_size*2) \\\n",
    "                    + std[feature_name]*(num_samples[feature_name]/(num_samples[feature_name]+batch_size*2))\n",
    "            else:\n",
    "                std[feature_name] = (((midlayers_features[0][feature_name]-mean[feature_name])**2).sum(axis=0)+((midlayers_features[1][feature_name]-mean[feature_name])**2).sum(axis=0))/(batch_size*2)\n",
    "            num_samples[feature_name] += batch_size*2\n",
    "    for feature_name in DISCRIMINATOR_MIDFEATURE_SIZE.keys():\n",
    "        std[feature_name] = std[feature_name]**0.5\n",
    "    return mean, std\n",
    "mean,std = get_statistic(fp_dataloader_train)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "loss_batchs = {feature_name:[] for feature_name in DISCRIMINATOR_MIDFEATURE_SIZE.keys()} # 一个 batch 一个值\n",
    "Tensor = torch.cuda.FloatTensor \n",
    "\n",
    "for batch in tqdm(fp_dataloader_train):\n",
    "    # Unpack batch\n",
    "    mks, nds, eds, nd_to_sample, ed_to_sample = batch\n",
    "    # Configure input\n",
    "    real_mks = Variable(mks.type(Tensor)).to(device)\n",
    "    given_nds = Variable(nds.type(Tensor)).to(device)\n",
    "    given_eds = eds\n",
    "    # Generate a batch of images\n",
    "    z_shape = [real_mks.shape[0], opt.latent_dim]\n",
    "    z = Variable(Tensor(np.random.normal(0, 1, tuple(z_shape)))).to(device)\n",
    "    gen_mks = generator(z, given_nds, given_eds)\n",
    "    midlayers_features = [extract_midlayers_features(discriminator, real_mks, given_nds, given_eds, nd_to_sample),extract_midlayers_features(discriminator, gen_mks, given_nds, given_eds, nd_to_sample)]\n",
    "    midlayers_features[0] = {feature_name:(extracted_features-mean[feature_name])/std[feature_name] for feature_name, extracted_features in midlayers_features[0].items()}\n",
    "    midlayers_features[1] = {feature_name:(extracted_features-mean[feature_name])/std[feature_name] for feature_name, extracted_features in midlayers_features[1].items()}\n",
    "    for feature_name in DISCRIMINATOR_MIDFEATURE_SIZE.keys():\n",
    "        loss = train_probe_and_get_loss(midlayers_features,probes,probe_optimizers,feature_name)\n",
    "        loss_batchs[feature_name].append(loss)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "for name,loss_batch in loss_batchs.items():\n",
    "    print(name,loss_batch[-5:])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "CEloss = torch.nn.CrossEntropyLoss()\n",
    "# merge data\n",
    "feature_name=\"features_cat\"\n",
    "X = torch.cat([midlayers_features[0][feature_name],midlayers_features[1][feature_name]],dim=0)\n",
    "# forward propagation\n",
    "preds = probes[feature_name](X)\n",
    "# labels\n",
    "device = preds.device\n",
    "labels = torch.cat([torch.zeros(midlayers_features[0][feature_name].shape[0], dtype=torch.long), torch.ones(midlayers_features[1][feature_name].shape[0], dtype=torch.long)], 0).to(device)\n",
    "# loss\n",
    "loss = CEloss(preds, labels)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print(X.shape)\n",
    "plt.figure(figsize=(30,30))\n",
    "plt.imshow(make_grid(X.reshape(128,16,8).unsqueeze(1),nrow=16,padding=10).cpu().numpy().transpose(1,2,0))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "X[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from torchvision.utils import make_grid\n",
    "import matplotlib.pyplot as plt\n",
    "feature_name = \"features_decoder\"\n",
    "x = midlayers_features[0][feature_name].view(64,8,32,32)[:,1,:,:]\n",
    "plt.figure(figsize=(16,16))\n",
    "plt.imshow(make_grid(x.unsqueeze(1),nrow=8).cpu().numpy().transpose(1,2,0))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "DISCRIMINATOR_MIDFEATURE_SIZE"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "interpreter": {
   "hash": "1563a55f53cc4feb314d63612cb49e65678bb08110893b776c6c12b347576600"
  },
  "kernelspec": {
   "display_name": "Python 3.8.12 64-bit ('housegan': conda)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.12"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
