{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Three Ways of Storing and Accessing Lots of Images in Python\n",
    "\n",
    "@ysbecca\n",
    "\n",
    "\n",
    "First, loading the CIFAR-10 data into memory as numpy arrays."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Loaded CIFAR-10 training set:\n",
      " - np.shape(images)      (0,)\n",
      " - np.shape(labels)      (0,)\n"
     ]
    }
   ],
   "source": [
    "import pickle\n",
    "from pathlib import Path\n",
    "\n",
    "import numpy as np\n",
    "\n",
    "# Path to the unzipped CIFAR data\n",
    "data_dir = Path(\"data/cifar-10-batches-py/\")\n",
    "\n",
    "\n",
    "# Unpickle function provided by the CIFAR hosts\n",
    "def unpickle(file):\n",
    "    with open(file, \"rb\") as fo:\n",
    "        dict = pickle.load(fo, encoding=\"bytes\")\n",
    "    return dict\n",
    "\n",
    "\n",
    "images, labels = [], []\n",
    "for batch in data_dir.glob(\"data_batch_*\"):\n",
    "    batch_data = unpickle(batch)\n",
    "    for i, flat_im in enumerate(batch_data[b\"data\"]):\n",
    "        im_channels = []\n",
    "        # Each image is flattened, with channels in order of R, G, B\n",
    "        for j in range(3):\n",
    "            im_channels.append(\n",
    "                flat_im[j * 1024 : (j + 1) * 1024].reshape((32, 32))\n",
    "            )\n",
    "        # Reconstruct the original image\n",
    "        images.append(np.dstack((im_channels)))\n",
    "        # Save the label\n",
    "        labels.append(batch_data[b\"labels\"][i])\n",
    "\n",
    "print(\"Loaded CIFAR-10 training set:\")\n",
    "print(\" - np.shape(images)     \", np.shape(images))\n",
    "print(\" - np.shape(labels)     \", np.shape(labels))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Specifying the directories for storage using the different methods."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "from pathlib import Path\n",
    "\n",
    "disk_dir = Path(\"data/disk/\")\n",
    "lmdb_dir = Path(\"data/lmdb/\")\n",
    "hdf5_dir = Path(\"data/hdf5/\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Helper functions for timing."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Imports required for the methods."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "class CIFAR_Image:\n",
    "    def __init__(self, image, label):\n",
    "        # Dimensions of image for reconstruction - not really necessary for this\n",
    "        # dataset, but some datasets may include images of varying sizes\n",
    "        self.channels = image.shape[2]\n",
    "        self.size = image.shape[:2]\n",
    "\n",
    "        self.image = image.tobytes()\n",
    "        self.label = label\n",
    "\n",
    "    def get_image(self):\n",
    "        \"\"\"Returns the image as a numpy array.\"\"\"\n",
    "        image = np.frombuffer(self.image, dtype=np.uint8)\n",
    "        return image.reshape(*self.size, self.channels)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "ename": "ModuleNotFoundError",
     "evalue": "No module named 'h5py'",
     "output_type": "error",
     "traceback": [
      "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
      "\u001b[31mModuleNotFoundError\u001b[39m                       Traceback (most recent call last)",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[4]\u001b[39m\u001b[32m, line 5\u001b[39m\n\u001b[32m      2\u001b[39m \u001b[38;5;28;01mimport\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34;01mcsv\u001b[39;00m\n\u001b[32m      4\u001b[39m \u001b[38;5;66;03m# For HDF5\u001b[39;00m\n\u001b[32m----> \u001b[39m\u001b[32m5\u001b[39m \u001b[38;5;28;01mimport\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34;01mh5py\u001b[39;00m\n\u001b[32m      7\u001b[39m \u001b[38;5;66;03m# For lmdb\u001b[39;00m\n\u001b[32m      8\u001b[39m \u001b[38;5;28;01mimport\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34;01mlmdb\u001b[39;00m\n",
      "\u001b[31mModuleNotFoundError\u001b[39m: No module named 'h5py'"
     ]
    }
   ],
   "source": [
    "# For disk\n",
    "import csv\n",
    "\n",
    "# For HDF5\n",
    "import h5py\n",
    "\n",
    "# For lmdb\n",
    "import lmdb\n",
    "from PIL import Image\n",
    "\n",
    "\n",
    "def store_single_disk(image, image_id, label):\n",
    "    \"\"\"Stores a single image as a .png file on disk.\n",
    "    Parameters:\n",
    "    ---------------\n",
    "    image       image array, (32, 32, 3) to be stored\n",
    "    image_id    integer unique ID for image\n",
    "    label       image label\n",
    "    \"\"\"\n",
    "    Image.fromarray(image).save(disk_dir / f\"{image_id}.png\")\n",
    "\n",
    "    with open(disk_dir / f\"{image_id}.csv\", \"wt\") as csvfile:\n",
    "        writer = csv.writer(\n",
    "            csvfile,\n",
    "            delimiter=\" \",\n",
    "            quotechar=\"|\",\n",
    "            quoting=csv.QUOTE_MINIMAL,\n",
    "        )\n",
    "        writer.writerow([label])\n",
    "\n",
    "\n",
    "def store_single_lmdb(image, image_id, label):\n",
    "    \"\"\"Stores a single image to a LMDB.\n",
    "    Parameters:\n",
    "    ---------------\n",
    "    image       image array, (32, 32, 3) to be stored\n",
    "    image_id    integer unique ID for image\n",
    "    label       image label\n",
    "    \"\"\"\n",
    "\n",
    "    map_size = image.nbytes * 10\n",
    "\n",
    "    # Create a new LMDB environment\n",
    "    env = lmdb.open(str(lmdb_dir / \"single_lmdb\"), map_size=map_size)\n",
    "\n",
    "    # Start a new write transaction\n",
    "    with env.begin(write=True) as txn:\n",
    "        # All key-value pairs need to be strings\n",
    "        value = CIFAR_Image(image, label)\n",
    "        key = f\"{image_id:08}\"\n",
    "        txn.put(key.encode(\"ascii\"), pickle.dumps(value))\n",
    "    env.close()\n",
    "\n",
    "\n",
    "def store_single_hdf5(image, image_id, label):\n",
    "    \"\"\"Stores a single image to an HDF5 file.\n",
    "    Parameters:\n",
    "    ---------------\n",
    "    image       image array, (32, 32, 3) to be stored\n",
    "    image_id    integer unique ID for image\n",
    "    label       image label\n",
    "    \"\"\"\n",
    "\n",
    "    # Create a new HDF5 file\n",
    "    file = h5py.File(hdf5_dir / f\"{image_id}.h5\", \"w\")\n",
    "\n",
    "    # Create a dataset in the file\n",
    "    file.create_dataset(\n",
    "        \"image\",\n",
    "        np.shape(image),\n",
    "        h5py.h5t.STD_U8BE,\n",
    "        data=image,\n",
    "    )\n",
    "    file.create_dataset(\n",
    "        \"meta\",\n",
    "        np.shape(label),\n",
    "        h5py.h5t.STD_U8BE,\n",
    "        data=label,\n",
    "    )\n",
    "    file.close()\n",
    "\n",
    "\n",
    "_store_single_funcs = dict(\n",
    "    disk=store_single_disk,\n",
    "    lmdb=store_single_lmdb,\n",
    "    hdf5=store_single_hdf5,\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Run the write single image experiment"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from timeit import timeit\n",
    "\n",
    "store_single_timings = dict()\n",
    "\n",
    "for method in (\"disk\", \"lmdb\", \"hdf5\"):\n",
    "    t = timeit(\n",
    "        \"_store_single_funcs[method](image, 0, label)\",\n",
    "        setup=\"image=images[0]; label=labels[0]\",\n",
    "        number=1,\n",
    "        globals=globals(),\n",
    "    )\n",
    "    store_single_timings[method] = t\n",
    "    print(f\"Method: {method}, Time usage: {t}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def store_many_disk(images, labels):\n",
    "    \"\"\"Stores an array of images to disk\n",
    "    Parameters:\n",
    "    ---------------\n",
    "    images       images array, (N, 32, 32, 3) to be stored\n",
    "    labels       labels array, (N, 1) to be stored\n",
    "    \"\"\"\n",
    "    num_images = len(images)\n",
    "\n",
    "    # Save all the images one by one\n",
    "    for i, image in enumerate(images):\n",
    "        Image.fromarray(image).save(disk_dir / f\"{i}.png\")\n",
    "\n",
    "    # Save all the labels to the csv file\n",
    "    with open(disk_dir / f\"{num_images}.csv\", \"w\") as csvfile:\n",
    "        writer = csv.writer(\n",
    "            csvfile,\n",
    "            delimiter=\" \",\n",
    "            quotechar=\"|\",\n",
    "            quoting=csv.QUOTE_MINIMAL,\n",
    "        )\n",
    "        for label in labels:\n",
    "            # Remember that this typically would be more than just one value per row\n",
    "            writer.writerow([label])\n",
    "\n",
    "\n",
    "def store_many_lmdb(images, labels):\n",
    "    \"\"\"Stores an array of images to LMDB.\n",
    "    Parameters:\n",
    "    ---------------\n",
    "    images       images array, (N, 32, 32, 3) to be stored\n",
    "    labels       labels array, (N, 1) to be stored\n",
    "    \"\"\"\n",
    "    num_images = len(images)\n",
    "\n",
    "    map_size = num_images * images[0].nbytes * 10\n",
    "\n",
    "    # Create a new LMDB DB for all the images\n",
    "    env = lmdb.open(\n",
    "        str(lmdb_dir / f\"{num_images}_lmdb\"),\n",
    "        map_size=map_size,\n",
    "    )\n",
    "\n",
    "    # Same as before; but let's write all the images in a single transaction\n",
    "    with env.begin(write=True) as txn:\n",
    "        for i in range(num_images):\n",
    "            # All key-value pairs need to be Strings\n",
    "            value = CIFAR_Image(images[i], labels[i])\n",
    "            key = f\"{i:08}\"\n",
    "            txn.put(key.encode(\"ascii\"), pickle.dumps(value))\n",
    "    env.close()\n",
    "\n",
    "\n",
    "def store_many_hdf5(images, labels):\n",
    "    \"\"\"Stores an array of images to HDF5.\n",
    "    Parameters:\n",
    "    ---------------\n",
    "    images       images array, (N, 32, 32, 3) to be stored\n",
    "    labels       labels array, (N, 1) to be stored\n",
    "    \"\"\"\n",
    "    num_images = len(images)\n",
    "\n",
    "    # Create a new HDF5 file\n",
    "    file = h5py.File(hdf5_dir / f\"{num_images}_many.h5\", \"w\")\n",
    "\n",
    "    # Create a dataset in the file\n",
    "    file.create_dataset(\n",
    "        \"images\",\n",
    "        np.shape(images),\n",
    "        h5py.h5t.STD_U8BE,\n",
    "        data=images,\n",
    "    )\n",
    "    file.create_dataset(\n",
    "        \"meta\",\n",
    "        np.shape(labels),\n",
    "        h5py.h5t.STD_U8BE,\n",
    "        data=labels,\n",
    "    )\n",
    "    file.close()\n",
    "\n",
    "\n",
    "_store_many_funcs = dict(\n",
    "    disk=store_many_disk,\n",
    "    lmdb=store_many_lmdb,\n",
    "    hdf5=store_many_hdf5,\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Run the multiple images experiment now."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "cutoffs = [10, 100, 1000, 10000, 100000]\n",
    "\n",
    "# Let's double our images so that we have 100,000\n",
    "images = np.concatenate((images, images), axis=0)\n",
    "labels = np.concatenate((labels, labels), axis=0)\n",
    "\n",
    "print(np.shape(images))\n",
    "print(np.shape(labels))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from timeit import timeit\n",
    "\n",
    "store_many_timings = {\"disk\": [], \"lmdb\": [], \"hdf5\": []}\n",
    "\n",
    "for cutoff in cutoffs:\n",
    "    for method in (\"disk\", \"lmdb\", \"hdf5\"):\n",
    "        t = timeit(\n",
    "            \"_store_many_funcs[method](images_, labels_)\",\n",
    "            setup=\"images_=images[:cutoff]; labels_=labels[:cutoff]\",\n",
    "            number=1,\n",
    "            globals=globals(),\n",
    "        )\n",
    "        store_many_timings[method].append(t)\n",
    "\n",
    "        # Print out the method, cutoff, and elapsed time\n",
    "        print(f\"Method: {method}, Time usage: {t}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "store_many_timings"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Let's visualise those results."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "\n",
    "plt.rcParams.update({\"font.size\": 20})\n",
    "\n",
    "\n",
    "def plot_with_legend(\n",
    "    x_range,\n",
    "    y_data,\n",
    "    legend_labels,\n",
    "    x_label,\n",
    "    y_label,\n",
    "    title,\n",
    "    log=False,\n",
    "):\n",
    "    \"\"\"Displays a single plot with multiple datasets and matching legends.\n",
    "    Parameters:\n",
    "    --------------\n",
    "    x_range         list of lists containing x data\n",
    "    y_data          list of lists containing y values\n",
    "    legend_labels   list of string legend labels\n",
    "    x_label         x axis label\n",
    "    y_label         y axis label\n",
    "    \"\"\"\n",
    "    plt.style.use(\"seaborn-whitegrid\")\n",
    "    plt.figure(figsize=(10, 7))\n",
    "\n",
    "    if len(y_data) != len(legend_labels):\n",
    "        raise TypeError(\n",
    "            \"Error: the number of data sets does not match the number of labels provided.\"\n",
    "        )\n",
    "\n",
    "    all_plots = []\n",
    "    for data, label in zip(y_data, legend_labels, strict=False):\n",
    "        if log:\n",
    "            (temp,) = plt.loglog(x_range, data, label=label)\n",
    "        else:\n",
    "            (temp,) = plt.plot(x_range, data, label=label)\n",
    "        all_plots.append(temp)\n",
    "\n",
    "    plt.title(title)\n",
    "    plt.xlabel(x_label)\n",
    "    plt.ylabel(y_label)\n",
    "    plt.legend(handles=all_plots)\n",
    "    plt.figure(figsize=(20, 10))\n",
    "    plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "disk_x = store_many_timings[\"disk\"]\n",
    "lmdb_x = store_many_timings[\"lmdb\"]\n",
    "hdf5_x = store_many_timings[\"hdf5\"]\n",
    "\n",
    "plot_with_legend(\n",
    "    cutoffs,\n",
    "    [disk_x, lmdb_x, hdf5_x],\n",
    "    [\"PNG files\", \"LMDB\", \"HDF5\"],\n",
    "    \"Number of images\",\n",
    "    \"Seconds to store\",\n",
    "    \"Storage time\",\n",
    "    log=False,\n",
    ")\n",
    "\n",
    "plot_with_legend(\n",
    "    cutoffs,\n",
    "    [disk_x, lmdb_x, hdf5_x],\n",
    "    [\"PNG files\", \"LMDB\", \"HDF5\"],\n",
    "    \"Number of images\",\n",
    "    \"Seconds to store\",\n",
    "    \"Log storage time\",\n",
    "    log=True,\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Visualise how much memory is used."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Memory used in KB\n",
    "disk_mem = [24, 204, 2004, 20032, 200296]\n",
    "lmdb_mem = [60, 420, 4000, 39000, 393000]\n",
    "hdf5_mem = [36, 304, 2900, 29000, 293000]\n",
    "\n",
    "X = [disk_mem, lmdb_mem, hdf5_mem]\n",
    "\n",
    "ind = np.arange(3)\n",
    "width = 0.35\n",
    "\n",
    "plt.subplots(figsize=(8, 10))\n",
    "plots = [plt.bar(ind, [row[0] for row in X], width)]\n",
    "for i in range(1, len(cutoffs)):\n",
    "    plots.append(\n",
    "        plt.bar(\n",
    "            ind,\n",
    "            [row[i] for row in X],\n",
    "            width,\n",
    "            bottom=[row[i - 1] for row in X],\n",
    "        )\n",
    "    )\n",
    "\n",
    "plt.ylabel(\"Memory in KB\")\n",
    "plt.title(\"Disk memory used by method\")\n",
    "plt.xticks(ind, (\"PNG\", \"LMDB\", \"HDF5\"))\n",
    "plt.yticks(np.arange(0, 400000, 100000))\n",
    "\n",
    "plt.legend(\n",
    "    [plot[0] for plot in plots],\n",
    "    (\"10\", \"100\", \"1,000\", \"10,000\", \"100,000\"),\n",
    ")\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Read out a single image."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def read_single_disk(image_id):\n",
    "    \"\"\"Stores a single image to disk.\n",
    "    Parameters:\n",
    "    ---------------\n",
    "    image_id    integer unique ID for image\n",
    "\n",
    "    Returns:\n",
    "    ----------\n",
    "    image       image array, (32, 32, 3) to be stored\n",
    "    label       associated meta data, int label\n",
    "    \"\"\"\n",
    "    image = np.array(Image.open(disk_dir / f\"{image_id}.png\"))\n",
    "\n",
    "    with open(disk_dir / f\"{image_id}.csv\", \"r\") as csvfile:\n",
    "        reader = csv.reader(\n",
    "            csvfile,\n",
    "            delimiter=\" \",\n",
    "            quotechar=\"|\",\n",
    "            quoting=csv.QUOTE_MINIMAL,\n",
    "        )\n",
    "        label = int(next(reader)[0])\n",
    "\n",
    "    return image, label\n",
    "\n",
    "\n",
    "def read_single_lmdb(image_id):\n",
    "    \"\"\"Stores a single image to LMDB.\n",
    "    Parameters:\n",
    "    ---------------\n",
    "    image_id    integer unique ID for image\n",
    "\n",
    "    Returns:\n",
    "    ----------\n",
    "    image       image array, (32, 32, 3) to be stored\n",
    "    label       associated meta data, int label\n",
    "    \"\"\"\n",
    "\n",
    "    # Open the LMDB environment; see (1)\n",
    "    env = lmdb.open(str(lmdb_dir / \"single_lmdb\"), readonly=True)\n",
    "\n",
    "    # Start a new read transaction\n",
    "    with env.begin() as txn:\n",
    "        # Encode the key the same way as we stored it\n",
    "        data = txn.get(f\"{image_id:08}\".encode(\"ascii\"))\n",
    "        # Remember that it's a CIFAR_Image object that we get back out\n",
    "        cifar_image = pickle.loads(data)\n",
    "        # Retrieve the relevant bits\n",
    "        image = cifar_image.get_image()\n",
    "        label = cifar_image.label\n",
    "    env.close()\n",
    "\n",
    "    return image, label\n",
    "\n",
    "\n",
    "def read_single_hdf5(image_id):\n",
    "    \"\"\"Stores a single image to HDF5.\n",
    "    Parameters:\n",
    "    ---------------\n",
    "    image_id    integer unique ID for image\n",
    "\n",
    "    Returns:\n",
    "    ----------\n",
    "    image       image array, (32, 32, 3) to be stored\n",
    "    label       associated meta data, int label\n",
    "    \"\"\"\n",
    "\n",
    "    # Open the HDF5 file\n",
    "    file = h5py.File(hdf5_dir / f\"{image_id}.h5\", \"r+\")\n",
    "\n",
    "    image = np.array(file[\"/image\"]).astype(\"uint8\")\n",
    "    label = int(np.array(file[\"/meta\"]).astype(\"uint8\"))\n",
    "\n",
    "    return image, label\n",
    "\n",
    "\n",
    "_read_single_funcs = dict(\n",
    "    disk=read_single_disk,\n",
    "    lmdb=read_single_lmdb,\n",
    "    hdf5=read_single_hdf5,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from timeit import timeit\n",
    "\n",
    "read_single_timings = dict()\n",
    "\n",
    "for method in (\"disk\", \"lmdb\", \"hdf5\"):\n",
    "    t = timeit(\n",
    "        \"_read_single_funcs[method](0)\",\n",
    "        setup=\"image=images[0]; label=labels[0]\",\n",
    "        number=1,\n",
    "        globals=globals(),\n",
    "    )\n",
    "    read_single_timings[method] = t\n",
    "    print(f\"Method: {method}, Time usage: {t}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "read_single_timings"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Reading in many images"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def read_many_disk(num_images):\n",
    "    \"\"\"Reads image from disk.\n",
    "    Parameters:\n",
    "    ---------------\n",
    "    num_images   number of images to read\n",
    "\n",
    "    Returns:\n",
    "    ----------\n",
    "    images      images array, (N, 32, 32, 3) to be stored\n",
    "    labels      associated meta data, int label (N, 1)\n",
    "    \"\"\"\n",
    "    images, labels = [], []\n",
    "\n",
    "    # Loop over all IDs and read each image in one by one\n",
    "    for image_id in range(num_images):\n",
    "        images.append(np.array(Image.open(disk_dir / f\"{image_id}.png\")))\n",
    "\n",
    "    with open(disk_dir / f\"{num_images}.csv\", \"r\") as csvfile:\n",
    "        reader = csv.reader(\n",
    "            csvfile,\n",
    "            delimiter=\" \",\n",
    "            quotechar=\"|\",\n",
    "            quoting=csv.QUOTE_MINIMAL,\n",
    "        )\n",
    "        for row in reader:\n",
    "            labels.append(int(row[0]))\n",
    "    return images, labels\n",
    "\n",
    "\n",
    "def read_many_lmdb(num_images):\n",
    "    \"\"\"Reads image from LMDB.\n",
    "    Parameters:\n",
    "    ---------------\n",
    "    num_images   number of images to read\n",
    "\n",
    "    Returns:\n",
    "    ----------\n",
    "    images      images array, (N, 32, 32, 3) to be stored\n",
    "    labels      associated meta data, int label (N, 1)\n",
    "    \"\"\"\n",
    "    images, labels = [], []\n",
    "    env = lmdb.open(str(lmdb_dir / f\"{num_images}_lmdb\"), readonly=True)\n",
    "\n",
    "    # Start a new read transaction\n",
    "    with env.begin() as txn:\n",
    "        # Read all images in one single transaction, with one lock\n",
    "        # We could split this up into multiple transactions if needed\n",
    "        for image_id in range(num_images):\n",
    "            data = txn.get(f\"{image_id:08}\".encode(\"ascii\"))\n",
    "            # Remember that it's a CIFAR_Image object that is stored as the value\n",
    "            cifar_image = pickle.loads(data)\n",
    "            # Retrieve the relevant bits\n",
    "            images.append(cifar_image.get_image())\n",
    "            labels.append(cifar_image.label)\n",
    "    env.close()\n",
    "    return images, labels\n",
    "\n",
    "\n",
    "def read_many_hdf5(num_images):\n",
    "    \"\"\"Reads image from HDF5.\n",
    "    Parameters:\n",
    "    ---------------\n",
    "    num_images   number of images to read\n",
    "\n",
    "    Returns:\n",
    "    ----------\n",
    "    images      images array, (N, 32, 32, 3) to be stored\n",
    "    labels      associated meta data, int label (N, 1)\n",
    "    \"\"\"\n",
    "    images, labels = [], []\n",
    "\n",
    "    # Open the HDF5 file\n",
    "    file = h5py.File(hdf5_dir / f\"{num_images}_many.h5\", \"r+\")\n",
    "\n",
    "    images = np.array(file[\"/images\"]).astype(\"uint8\")\n",
    "    labels = np.array(file[\"/meta\"]).astype(\"uint8\")\n",
    "\n",
    "    return images, labels\n",
    "\n",
    "\n",
    "_read_many_funcs = dict(\n",
    "    disk=read_many_disk,\n",
    "    lmdb=read_many_lmdb,\n",
    "    hdf5=read_many_hdf5,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from timeit import timeit\n",
    "\n",
    "read_many_timings = {\"disk\": [], \"lmdb\": [], \"hdf5\": []}\n",
    "\n",
    "for cutoff in cutoffs:\n",
    "    for method in (\"disk\", \"lmdb\", \"hdf5\"):\n",
    "        t = timeit(\n",
    "            \"_read_many_funcs[method](num_images)\",\n",
    "            setup=\"num_images=cutoff\",\n",
    "            number=1,\n",
    "            globals=globals(),\n",
    "        )\n",
    "        read_many_timings[method].append(t)\n",
    "\n",
    "        # Print out the method, cutoff, and elapsed time\n",
    "        print(f\"Method: {method}, No. images: {cutoff}, Time usage: {t}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "disk_x_r = read_many_timings[\"disk\"]\n",
    "lmdb_x_r = read_many_timings[\"lmdb\"]\n",
    "hdf5_x_r = read_many_timings[\"hdf5\"]\n",
    "\n",
    "plot_with_legend(\n",
    "    cutoffs,\n",
    "    [disk_x_r, lmdb_x_r, hdf5_x_r],\n",
    "    [\"PNG files\", \"LMDB\", \"HDF5\"],\n",
    "    \"Number of images\",\n",
    "    \"Seconds to read\",\n",
    "    \"Read time\",\n",
    "    log=False,\n",
    ")\n",
    "\n",
    "plot_with_legend(\n",
    "    cutoffs,\n",
    "    [disk_x_r, lmdb_x_r, hdf5_x_r],\n",
    "    [\"PNG files\", \"LMDB\", \"HDF5\"],\n",
    "    \"Number of images\",\n",
    "    \"Seconds to read\",\n",
    "    \"Log read time\",\n",
    "    log=True,\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Compare with the write times."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_with_legend(\n",
    "    cutoffs,\n",
    "    [disk_x_r, lmdb_x_r, hdf5_x_r, disk_x, lmdb_x, hdf5_x],\n",
    "    [\n",
    "        \"Read PNG\",\n",
    "        \"Read LMDB\",\n",
    "        \"Read HDF5\",\n",
    "        \"Write PNG\",\n",
    "        \"Write LMDB\",\n",
    "        \"Write HDF5\",\n",
    "    ],\n",
    "    \"Number of images\",\n",
    "    \"Seconds\",\n",
    "    \"Store and Read Times\",\n",
    "    log=False,\n",
    ")\n",
    "\n",
    "plot_with_legend(\n",
    "    cutoffs,\n",
    "    [disk_x_r, lmdb_x_r, hdf5_x_r, disk_x, lmdb_x, hdf5_x],\n",
    "    [\n",
    "        \"Read PNG\",\n",
    "        \"Read LMDB\",\n",
    "        \"Read HDF5\",\n",
    "        \"Write PNG\",\n",
    "        \"Write LMDB\",\n",
    "        \"Write HDF5\",\n",
    "    ],\n",
    "    \"Number of images\",\n",
    "    \"Seconds\",\n",
    "    \"Log Store and Read Times\",\n",
    "    log=True,\n",
    ")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.13.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
