{
 "cells": [
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "https://github.com/ritheshkumar95/pytorch-vqvae/tree/master"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import torch\n",
    "import torch.nn.functional as F\n",
    "from torchvision import transforms, datasets\n",
    "from torchvision.utils import save_image, make_grid\n",
    "\n",
    "from modules import VectorQuantizedVAE, to_scalar\n",
    "from datasets import MiniImagenet\n",
    "\n",
    "from tensorboardX import SummaryWriter\n",
    "\n",
    "def train(data_loader, model, optimizer, args, writer):\n",
    "    for images, _ in data_loader:\n",
    "        images = images.to(args.device)\n",
    "\n",
    "        optimizer.zero_grad()\n",
    "        x_tilde, z_e_x, z_q_x = model(images)\n",
    "\n",
    "        # Reconstruction loss\n",
    "        loss_recons = F.mse_loss(x_tilde, images)\n",
    "        # Vector quantization objective\n",
    "        loss_vq = F.mse_loss(z_q_x, z_e_x.detach())\n",
    "        # Commitment objective\n",
    "        loss_commit = F.mse_loss(z_e_x, z_q_x.detach())\n",
    "\n",
    "        loss = loss_recons + loss_vq + args.beta * loss_commit\n",
    "        loss.backward()\n",
    "\n",
    "        # Logs\n",
    "        writer.add_scalar('loss/train/reconstruction', loss_recons.item(), args.steps)\n",
    "        writer.add_scalar('loss/train/quantization', loss_vq.item(), args.steps)\n",
    "\n",
    "        optimizer.step()\n",
    "        args.steps += 1\n",
    "\n",
    "def test(data_loader, model, args, writer):\n",
    "    with torch.no_grad():\n",
    "        loss_recons, loss_vq = 0., 0.\n",
    "        for images, _ in data_loader:\n",
    "            images = images.to(args.device)\n",
    "            x_tilde, z_e_x, z_q_x = model(images)\n",
    "            loss_recons += F.mse_loss(x_tilde, images)\n",
    "            loss_vq += F.mse_loss(z_q_x, z_e_x)\n",
    "\n",
    "        loss_recons /= len(data_loader)\n",
    "        loss_vq /= len(data_loader)\n",
    "\n",
    "    # Logs\n",
    "    writer.add_scalar('loss/test/reconstruction', loss_recons.item(), args.steps)\n",
    "    writer.add_scalar('loss/test/quantization', loss_vq.item(), args.steps)\n",
    "\n",
    "    return loss_recons.item(), loss_vq.item()\n",
    "\n",
    "def generate_samples(images, model, args):\n",
    "    with torch.no_grad():\n",
    "        images = images.to(args.device)\n",
    "        x_tilde, _, _ = model(images)\n",
    "    return x_tilde\n",
    "\n",
    "def main(args):\n",
    "    writer = SummaryWriter('./logs/{0}'.format(args.output_folder))\n",
    "    save_filename = './models/{0}'.format(args.output_folder)\n",
    "\n",
    "    if args.dataset in ['mnist', 'fashion-mnist', 'cifar10']:\n",
    "        transform = transforms.Compose([\n",
    "            transforms.ToTensor(),\n",
    "            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n",
    "        ])\n",
    "        if args.dataset == 'mnist':\n",
    "            # Define the train & test datasets\n",
    "            train_dataset = datasets.MNIST(args.data_folder, train=True,\n",
    "                download=True, transform=transform)\n",
    "            test_dataset = datasets.MNIST(args.data_folder, train=False,\n",
    "                transform=transform)\n",
    "            num_channels = 1\n",
    "        elif args.dataset == 'fashion-mnist':\n",
    "            # Define the train & test datasets\n",
    "            train_dataset = datasets.FashionMNIST(args.data_folder,\n",
    "                train=True, download=True, transform=transform)\n",
    "            test_dataset = datasets.FashionMNIST(args.data_folder,\n",
    "                train=False, transform=transform)\n",
    "            num_channels = 1\n",
    "        elif args.dataset == 'cifar10':\n",
    "            # Define the train & test datasets\n",
    "            train_dataset = datasets.CIFAR10(args.data_folder,\n",
    "                train=True, download=True, transform=transform)\n",
    "            test_dataset = datasets.CIFAR10(args.data_folder,\n",
    "                train=False, transform=transform)\n",
    "            num_channels = 3\n",
    "        valid_dataset = test_dataset\n",
    "    elif args.dataset == 'miniimagenet':\n",
    "        transform = transforms.Compose([\n",
    "            transforms.RandomResizedCrop(128),\n",
    "            transforms.ToTensor(),\n",
    "            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n",
    "        ])\n",
    "        # Define the train, valid & test datasets\n",
    "        train_dataset = MiniImagenet(args.data_folder, train=True,\n",
    "            download=True, transform=transform)\n",
    "        valid_dataset = MiniImagenet(args.data_folder, valid=True,\n",
    "            download=True, transform=transform)\n",
    "        test_dataset = MiniImagenet(args.data_folder, test=True,\n",
    "            download=True, transform=transform)\n",
    "        num_channels = 3\n",
    "\n",
    "    # Define the data loaders\n",
    "    train_loader = torch.utils.data.DataLoader(train_dataset,\n",
    "        batch_size=args.batch_size, shuffle=False,\n",
    "        num_workers=args.num_workers, pin_memory=True)\n",
    "    valid_loader = torch.utils.data.DataLoader(valid_dataset,\n",
    "        batch_size=args.batch_size, shuffle=False, drop_last=True,\n",
    "        num_workers=args.num_workers, pin_memory=True)\n",
    "    test_loader = torch.utils.data.DataLoader(test_dataset,\n",
    "        batch_size=16, shuffle=True)\n",
    "\n",
    "    # Fixed images for Tensorboard\n",
    "    fixed_images, _ = next(iter(test_loader))\n",
    "    fixed_grid = make_grid(fixed_images, nrow=8, range=(-1, 1), normalize=True)\n",
    "    writer.add_image('original', fixed_grid, 0)\n",
    "\n",
    "    model = VectorQuantizedVAE(num_channels, args.hidden_size, args.k).to(args.device)\n",
    "    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)\n",
    "\n",
    "    # Generate the samples first once\n",
    "    reconstruction = generate_samples(fixed_images, model, args)\n",
    "    grid = make_grid(reconstruction.cpu(), nrow=8, range=(-1, 1), normalize=True)\n",
    "    writer.add_image('reconstruction', grid, 0)\n",
    "\n",
    "    best_loss = -1.\n",
    "    for epoch in range(args.num_epochs):\n",
    "        train(train_loader, model, optimizer, args, writer)\n",
    "        loss, _ = test(valid_loader, model, args, writer)\n",
    "\n",
    "        reconstruction = generate_samples(fixed_images, model, args)\n",
    "        grid = make_grid(reconstruction.cpu(), nrow=8, range=(-1, 1), normalize=True)\n",
    "        writer.add_image('reconstruction', grid, epoch + 1)\n",
    "\n",
    "        if (epoch == 0) or (loss < best_loss):\n",
    "            best_loss = loss\n",
    "            with open('{0}/best.pt'.format(save_filename), 'wb') as f:\n",
    "                torch.save(model.state_dict(), f)\n",
    "        with open('{0}/model_{1}.pt'.format(save_filename, epoch + 1), 'wb') as f:\n",
    "            torch.save(model.state_dict(), f)\n",
    "\n",
    "if __name__ == '__main__':\n",
    "    import argparse\n",
    "    import os\n",
    "    import multiprocessing as mp\n",
    "\n",
    "    parser = argparse.ArgumentParser(description='VQ-VAE')\n",
    "\n",
    "    # General\n",
    "    parser.add_argument('--data-folder', type=str,\n",
    "        help='name of the data folder')\n",
    "    parser.add_argument('--dataset', type=str,\n",
    "        help='name of the dataset (mnist, fashion-mnist, cifar10, miniimagenet)')\n",
    "\n",
    "    # Latent space\n",
    "    parser.add_argument('--hidden-size', type=int, default=256,\n",
    "        help='size of the latent vectors (default: 256)')\n",
    "    parser.add_argument('--k', type=int, default=512,\n",
    "        help='number of latent vectors (default: 512)')\n",
    "\n",
    "    # Optimization\n",
    "    parser.add_argument('--batch-size', type=int, default=128,\n",
    "        help='batch size (default: 128)')\n",
    "    parser.add_argument('--num-epochs', type=int, default=100,\n",
    "        help='number of epochs (default: 100)')\n",
    "    parser.add_argument('--lr', type=float, default=2e-4,\n",
    "        help='learning rate for Adam optimizer (default: 2e-4)')\n",
    "    parser.add_argument('--beta', type=float, default=1.0,\n",
    "        help='contribution of commitment loss, between 0.1 and 2.0 (default: 1.0)')\n",
    "\n",
    "    # Miscellaneous\n",
    "    parser.add_argument('--output-folder', type=str, default='vqvae',\n",
    "        help='name of the output folder (default: vqvae)')\n",
    "    parser.add_argument('--num-workers', type=int, default=mp.cpu_count() - 1,\n",
    "        help='number of workers for trajectories sampling (default: {0})'.format(mp.cpu_count() - 1))\n",
    "    parser.add_argument('--device', type=str, default='cpu',\n",
    "        help='set the device (cpu or cuda, default: cpu)')\n",
    "\n",
    "    args = parser.parse_args()\n",
    "\n",
    "    # Create logs and models folder if they don't exist\n",
    "    if not os.path.exists('./logs'):\n",
    "        os.makedirs('./logs')\n",
    "    if not os.path.exists('./models'):\n",
    "        os.makedirs('./models')\n",
    "    # Device\n",
    "    args.device = torch.device(args.device\n",
    "        if torch.cuda.is_available() else 'cpu')\n",
    "    # Slurm\n",
    "    if 'SLURM_JOB_ID' in os.environ:\n",
    "        args.output_folder += '-{0}'.format(os.environ['SLURM_JOB_ID'])\n",
    "    if not os.path.exists('./models/{0}'.format(args.output_folder)):\n",
    "        os.makedirs('./models/{0}'.format(args.output_folder))\n",
    "    args.steps = 0\n",
    "\n",
    "    main(args)"
   ]
  }
 ],
 "metadata": {
  "language_info": {
   "name": "python"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
