{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "accelerator": "GPU",
    "colab": {
      "name": "Mutual_Information_Maximization_for_Representation_Learning.ipynb",
      "version": "0.3.2",
      "provenance": [],
      "private_outputs": true,
      "collapsed_sections": [
        "3qHQzV6LhRsT",
        "4FJW4sNNg6Vr",
        "noACZM4kiBGq",
        "LGCSUC4r-tfW",
        "o16VFAvt_i_H",
        "q35Q4nvrB_fJ",
        "Ui2VAr9UBOns",
        "PKcYpomeCoUI",
        "5FbkpQ1dCZcq"
      ],
      "toc_visible": true
    },
    "kernelspec": {
      "display_name": "Python 3",
      "name": "python3"
    }
  },
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "colab_type": "text",
        "id": "ULndVKL12doD"
      },
      "source": [
        "##### Copyright 2019 Google LLC.\n",
        "\n",
        "Licensed under the Apache License, Version 2.0 (the \"License\")"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "9Mye5mzaTuxo",
        "colab": {}
      },
      "source": [
        "# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
        "# you may not use this file except in compliance with the License.\n",
        "# You may obtain a copy of the License at\n",
        "#\n",
        "# https://www.apache.org/licenses/LICENSE-2.0\n",
        "#\n",
        "# Unless required by applicable law or agreed to in writing, software\n",
        "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
        "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
        "# See the License for the specific language governing permissions and\n",
        "# limitations under the License."
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "colab_type": "text",
        "id": "h8Pcn3MPt4W6"
      },
      "source": [
        "# Basic setup\n",
        "\n",
        "**About this Colab**<br /> This is a companion Colab for the paper:\n",
        "\n",
        "*On Mutual Information Maximization for Representation Learning*<br />\n",
        "Michael Tschannen\\*, Josip Djolonga\\*, Paul Rubenstein, Sylvain Gelly, Mario Lucic\n",
        "\n",
        "The Colab can be used to visualize precomputed results or to rerun the experiments reported in the paper.\n",
        "\n",
        "**Running the experiments**<br />\n",
        "By default, the precomputed results will be loaded, but individual experiments can be run with the Colab by checking the `RUN_EXPERIMENTS` checkbox below. The batch size used in the paper was 128 and we average over 20 runs. With one run, the entire set of experiments will complete in ~2 hours. For multiple runs we suggest copying the code and running a stand-alone version. If you wish to run the experiments within the Colab, make sure you execute all cells in the \"Setup\" section."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "YmT88ebujJHC",
        "colab": {}
      },
      "source": [
        "#@title Imports, configurations, and helper functions { display-mode: \"form\" }\n",
        "from __future__ import division\n",
        "from __future__ import print_function\n",
        "\n",
        "import collections\n",
        "import copy\n",
        "import functools\n",
        "import itertools\n",
        "import os\n",
        "import pickle\n",
        "\n",
        "from matplotlib import pyplot as plt\n",
        "from matplotlib.ticker import FuncFormatter\n",
        "import numpy as np\n",
        "import pandas as pd\n",
        "from scipy.ndimage import gaussian_filter1d\n",
        "import seaborn as sns\n",
        "import tensorflow as tf\n",
        "from tensorflow.python.ops.parallel_for import gradients\n",
        "import tensorflow_datasets as tfds\n",
        "import tensorflow_probability as tfp\n",
        "import sklearn.linear_model as sk_linear\n",
        "\n",
        "\n",
        "slim = tf.contrib.slim\n",
        "tfb = tfp.bijectors\n",
        "tfd = tfp.distributions\n",
        "tfkl = tf.keras.layers\n",
        "\n",
        "tf.keras.backend.clear_session()\n",
        "\n",
        "ResultsConfig = collections.namedtuple(\n",
        "    \"ResultsConfig\", [\"nets\", \"critic\", \"loss\"])\n",
        "\n",
        "Results = collections.namedtuple(\n",
        "    'Results',\n",
        "    ['iterations', 'training_losses', 'testing_losses',\n",
        "     'classification_accuracies', 'singular_values'])\n",
        "\n",
        "ResultsAdversarial = collections.namedtuple(\n",
        "    \"ResultsAdversarial\",\n",
        "    [\"losses_e\", \"losses_c\", \"classification_accuracies\", \"iters\"]\n",
        ")\n",
        "\n",
        "ResultsSamplingIssues = collections.namedtuple(\n",
        "    \"ResultsSamplingIssues\", [\"mi_true\", \"nce_estimates_noniid\", \n",
        "                              \"nce_estimates_iid\", \"nwj_estimates_noniid\", \n",
        "                              \"nwj_estimates_iid\"])\n",
        "\n",
        "def convert_to_data_frame(result, exp_name, nets, critic, loss, seed):\n",
        "  \"\"\"Convert results class to a data frame.\"\"\"\n",
        "  label = \"{}, {}, {}\".format(nets, critic, loss)\n",
        "  rows = list(\n",
        "      zip(\n",
        "          itertools.repeat(exp_name),\n",
        "          itertools.repeat(nets),\n",
        "          itertools.repeat(critic),\n",
        "          itertools.repeat(loss),\n",
        "          itertools.repeat(seed),\n",
        "          result.iterations,\n",
        "          [-loss for loss in result.testing_losses],  # Loss -> bound.\n",
        "          result.classification_accuracies,\n",
        "          itertools.repeat(label)))\n",
        "  df_eval = pd.DataFrame(\n",
        "      rows,\n",
        "      columns=(\"exp_name\", \"nets\", \"Critic\", \"Estimator\",\n",
        "               \"run\", \"iteration\", \"bound_value\", \"accuracy\", \"label\"))\n",
        "\n",
        "  df_eval[\"Estimator\"] = df_eval[\"Estimator\"].replace(\n",
        "      to_replace={\n",
        "          \"nce\": \"$I_{NCE}$\",\n",
        "          \"nwj\": \"$I_{NWJ}$\"\n",
        "      })\n",
        "  df_eval[\"Critic\"] = df_eval[\"Critic\"].replace(\n",
        "      to_replace={\n",
        "          \"concat\": \"MLP\",\n",
        "          \"separable\": \"Separable\",\n",
        "          \"innerprod\": \"Inner product\",\n",
        "          \"bilinear\": \"Bilinear\"\n",
        "      })\n",
        "  return df_eval\n",
        "\n",
        "\n",
        "def apply_default_style(ax):\n",
        "  ax.set_xlim([0, 20001])\n",
        "  ax.get_xaxis().set_major_formatter(\n",
        "      FuncFormatter(lambda x, p: format(int(x/1000), ',')))\n",
        "  ax.set_xlabel(\"Training steps (in thousands)\")\n",
        "  plt.tick_params(top=False, right=False, bottom=False, left=False)\n",
        "  handles, labels = ax.get_legend_handles_labels()\n",
        "  plt.legend(loc=\"lower right\", handles=handles[1:], labels=labels[1:])\n",
        "\n",
        "FONTSIZE = 15 \n",
        "sns.set_style(\"whitegrid\")\n",
        "plt.rcParams.update({'axes.labelsize': FONTSIZE,\n",
        "                     'xtick.labelsize': FONTSIZE,\n",
        "                     'ytick.labelsize': FONTSIZE,\n",
        "                     'legend.fontsize': FONTSIZE})\n",
        "\n",
        "NRUNS = 1 #@param { type: \"slider\", min: 1, max: 20, step: 1}\n",
        "TRAIN_BATCH_SIZE = 128 #@param { type: \"slider\", min: 64, max: 128, step: 64}\n",
        "RUN_EXPERIMENTS = False #@param { type: \"boolean\"}\n",
        "DIMS = 784\n",
        "\n",
        "def get_testing_loss(x_array, session, loss, data_ph, dims, batch_size=512):\n",
        "  total_loss = 0\n",
        "  for i in range(0, x_array.shape[0], batch_size):\n",
        "    x_slice = x_array[i:i+batch_size, :dims]\n",
        "    total_loss += x_slice.shape[0] * session.run(loss,\n",
        "                                                 feed_dict={data_ph: x_slice})\n",
        "  return total_loss / x_array.shape[0]\n",
        "\n",
        "def get_classification_accuracy(session, codes, data_ph, dims):\n",
        "  x_train_mapped = map_data(x_train, session, codes, data_ph, dims)\n",
        "  x_test_mapped = map_data(x_test, session, codes, data_ph, dims)\n",
        "  accuracy = logistic_fit(x_train_mapped, y_train, x_test_mapped, y_test)\n",
        "  return accuracy\n",
        "\n",
        "def map_data(x_array, session, codes, data_ph, dims, batch_size=512):\n",
        "  x_mapped = []\n",
        "  for i in range(0, x_array.shape[0], batch_size):\n",
        "    x_mapped.append(\n",
        "        session.run(codes,\n",
        "                    feed_dict={data_ph: x_array[i:i+batch_size, :dims]}))\n",
        "  return np.concatenate(x_mapped, axis=0)\n"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "pLbfDqH5tRUQ",
        "colab": {}
      },
      "source": [
        "#@title Import bounds implemented by Poole et al. (2019) { display-mode: \"form\" }\n",
        "# From https://colab.research.google.com/github/google-research/google-research/blob/master/vbmi/vbmi_demo.ipynb \n",
        "\n",
        "def reduce_logmeanexp_nodiag(x, axis=None):\n",
        "  batch_size = x.shape[0].value\n",
        "  logsumexp = tf.reduce_logsumexp(x - tf.linalg.tensor_diag(np.inf * tf.ones(batch_size)), axis=axis)\n",
        "  if axis:\n",
        "    num_elem = batch_size - 1.\n",
        "  else:\n",
        "    num_elem  = batch_size * (batch_size - 1.)\n",
        "  return logsumexp - tf.math.log(num_elem)\n",
        "\n",
        "def tuba_lower_bound(scores, log_baseline=None):\n",
        "  if log_baseline is not None:\n",
        "    scores -= log_baseline[:, None]\n",
        "  batch_size = tf.cast(scores.shape[0], tf.float32)\n",
        "  # First term is an expectation over samples from the joint,\n",
        "  # which are the diagonal elmements of the scores matrix.\n",
        "  joint_term = tf.reduce_mean(tf.linalg.diag_part(scores))\n",
        "  # Second term is an expectation over samples from the marginal,\n",
        "  # which are the off-diagonal elements of the scores matrix.\n",
        "  marg_term = tf.exp(reduce_logmeanexp_nodiag(scores))\n",
        "  return 1. + joint_term -  marg_term\n",
        "\n",
        "def nwj_lower_bound(scores):\n",
        "  # equivalent to: tuba_lower_bound(scores, log_baseline=1.)\n",
        "  return tuba_lower_bound(scores - 1.) \n",
        "\n",
        "def infonce_lower_bound(scores):\n",
        "  \"\"\"InfoNCE lower bound from van den Oord et al. (2018).\"\"\"\n",
        "  nll = tf.reduce_mean(tf.linalg.diag_part(scores) - tf.reduce_logsumexp(scores, axis=1))\n",
        "  # Alternative implementation:\n",
        "  # nll = -tf.nn.sparse_softmax_cross_entropy_with_logits(logits=scores, labels=tf.range(batch_size))\n",
        "  mi = tf.math.log(tf.cast(scores.shape[0].value, tf.float32)) + nll\n",
        "  return mi"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "pCuLIr7jcD_o",
        "colab": {}
      },
      "source": [
        "#@title Define the linear evaluation protocol { display-mode: \"form\" }\n",
        "\n",
        "def logistic_fit(x_train, y_train, x_test, y_test):\n",
        "  logistic_regressor = sk_linear.LogisticRegression(\n",
        "      solver='saga', multi_class='multinomial', tol=.1, C=10.)\n",
        "  from sklearn.preprocessing import MinMaxScaler\n",
        "  scaler = MinMaxScaler()\n",
        "  x_train = scaler.fit_transform(x_train)\n",
        "  x_test = scaler.transform(x_test)\n",
        "  logistic_regressor.fit(x_train, y_train.ravel())\n",
        "  return logistic_regressor.score(x_test, y_test.ravel())"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "NlcUu6NGkYAa",
        "colab": {}
      },
      "source": [
        "#@title Define and load the dataset, check baseline in pixel space { display-mode: \"form\" }\n",
        "\n",
        "tf.reset_default_graph()\n",
        "\n",
        "TFDS_NAME = \"mnist\"\n",
        "FEATURE_INPUT = \"image\"\n",
        "FEATURE_LABEL = \"label\"\n",
        "N_CLASSES = 10\n",
        "\n",
        "DIMS = 784  # Total dimensions after flattening.\n",
        "\n",
        "def map_fn(example):\n",
        "  image = example[FEATURE_INPUT]\n",
        "  image = tf.cast(image, tf.float32) / 255.0\n",
        "  image = tf.reshape(image, [-1])  # Flatten.\n",
        "  label = example[FEATURE_LABEL]\n",
        "  return {FEATURE_INPUT: image, FEATURE_LABEL: label}\n",
        "\n",
        "def load_data(split):\n",
        "  return (tfds.load(TFDS_NAME, split=split)\n",
        "              .cache()\n",
        "              .map(map_func=map_fn)\n",
        "              .shuffle(1000))\n",
        "  \n",
        "def tfds_to_np(dataset):\n",
        "  features = list(tfds.as_numpy(dataset))\n",
        "  images = np.stack([f[FEATURE_INPUT].ravel() for f in features])\n",
        "  labels = np.stack([f[FEATURE_LABEL].ravel() for f in features])\n",
        "  return images, labels\n",
        "\n",
        "dataset_train = load_data(\"train\")\n",
        "dataset_test = load_data(\"test\")\n",
        "x_train, y_train = tfds_to_np(dataset_train)\n",
        "x_test, y_test = tfds_to_np(dataset_test)\n",
        "tf.reset_default_graph()\n",
        "\n",
        "x_train_noisy = x_train + 0.05 * np.random.randn(*x_train.shape)\n",
        "x_test_noisy = x_test + 0.05 * np.random.randn(*x_test.shape)\n",
        "print(\"Fit on half the pixels: {}. It should be around 0.835.\".format(\n",
        "    logistic_fit(x_train_noisy[:, :DIMS//2], y_train,\n",
        "                 x_test_noisy[:, :DIMS//2], y_test)))\n",
        "\n",
        "def processed_train_data(dims, batch_size):\n",
        "  dataset = load_data(\"train\")\n",
        "  dataset_batched = dataset.repeat().batch(batch_size, drop_remainder=True)\n",
        "  get_next = dataset_batched.make_one_shot_iterator().get_next()\n",
        "  features = get_next[FEATURE_INPUT]\n",
        "  labels = get_next[FEATURE_LABEL]\n",
        "  x_1, x_2 = tf.split(features, [dims, DIMS-dims], axis=-1)\n",
        "  return x_1, x_2, labels"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "colab_type": "text",
        "id": "3qHQzV6LhRsT"
      },
      "source": [
        "## Encoders\n",
        "\n",
        "Here we define the encoder architectures, namely MLP and ConvNet."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "XqRJ4jSspi4o",
        "colab": {}
      },
      "source": [
        "class MLP(tf.keras.Model):\n",
        "  def __init__(self, layer_dimensions, shortcuts, dense_kwargs={}):\n",
        "      super(MLP, self).__init__()\n",
        "      self._layers = [tfkl.Dense(dimensions, **dense_kwargs)\n",
        "                     for dimensions in layer_dimensions[:-1]]\n",
        "      dense_kwargs_copy = copy.deepcopy(dense_kwargs)\n",
        "      dense_kwargs_copy[\"activation\"] = None\n",
        "      self._layers.append(tfkl.Dense(layer_dimensions[-1], **dense_kwargs_copy))\n",
        "      self._shortcuts = shortcuts\n",
        "\n",
        "  @property\n",
        "  def layers(self):\n",
        "    return self._layers\n",
        "\n",
        "  def __call__(self, inputs):\n",
        "    x = inputs\n",
        "    for layer in self.layers:\n",
        "      x = layer(x) + x if self._shortcuts else layer(x)\n",
        "    return x\n",
        "\n",
        "\n",
        "# LayerNorm implementation copied from\n",
        "# https://stackoverflow.com/questions/39095252/fail-to-implement-layer-normalization-with-keras\n",
        "class LayerNorm(tfkl.Layer):\n",
        "\n",
        "    \"\"\" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 \"\"\"\n",
        "    def __init__(self, scale_initializer='ones', bias_initializer='zeros',\n",
        "                 axes=[1,2,3], epsilon=1e-6, **kwargs):\n",
        "        super(LayerNorm, self).__init__(**kwargs)\n",
        "        self.epsilon = epsilon\n",
        "        self.scale_initializer = tf.keras.initializers.get(scale_initializer)\n",
        "        self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n",
        "        self.axes = axes\n",
        "\n",
        "    def build(self, input_shape):\n",
        "        self.scale = self.add_weight(shape=(input_shape[-1],),\n",
        "                                     initializer=self.scale_initializer,\n",
        "                                     trainable=True,\n",
        "                                     name='{}_scale'.format(self.name))\n",
        "        self.bias = self.add_weight(shape=(input_shape[-1],),\n",
        "                                    initializer=self.bias_initializer,\n",
        "                                    trainable=True,\n",
        "                                    name='{}_bias'.format(self.name))\n",
        "        self.built = True\n",
        "\n",
        "    def call(self, x, mask=None):\n",
        "        mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)\n",
        "        std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)\n",
        "        norm = (x - mean) * (1/(std + self.epsilon))\n",
        "        return norm * self.scale + self.bias\n",
        "\n",
        "    def compute_output_shape(self, input_shape):\n",
        "        return input_shape\n",
        "\n",
        "\n",
        "class ConvNet(tf.keras.Sequential):\n",
        "  def __init__(self, channels=64, kernel_size=5, input_dim=DIMS//2, output_dim=100,\n",
        "               activation=tf.nn.relu):\n",
        "      # Note: This works only for the specific data set considered here.\n",
        "      super(ConvNet, self).__init__([\n",
        "        tfkl.Reshape((14, 28, 1), input_shape=(input_dim,)),\n",
        "        tfkl.Conv2D(channels, kernel_size, strides=2,\n",
        "                    padding=\"same\", activation=activation),\n",
        "        tfkl.Conv2D(2*channels, kernel_size, strides=2,\n",
        "                    padding=\"same\", activation=activation),\n",
        "        LayerNorm(),\n",
        "        tfkl.GlobalAveragePooling2D(),\n",
        "        tfkl.Dense(output_dim),\n",
        "      ])"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "colab_type": "text",
        "id": "4FJW4sNNg6Vr"
      },
      "source": [
        "### Custom RealMVP\n",
        "\n",
        "We make two small modifications to the standard RealNVP implementation (highlighted with comments with **). Due to numerical instability we replace the exp with a softplus. The exp is normally used because it makes calculation of the log-det-jacobian simple, which is necessary for many applications of  normalizing flows. In our setting, we only care about the architecture being  invertible. This is still satisfied with our modification."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "VlNPS3KojJFa",
        "colab": {}
      },
      "source": [
        "from tensorflow_probability.python.internal import tensorshape_util\n",
        "import tensorflow.compat.v1 as tf1\n",
        "from tensorflow_probability.python.bijectors import affine_scalar\n",
        "from tensorflow_probability.python.bijectors import bijector as bijector_lib\n",
        "\n",
        "# Modified from tensorflow_probability/python/bijectors/real_nvp.py\n",
        "class RealNVP(bijector_lib.Bijector):\n",
        "  def __init__(self,\n",
        "               num_masked,\n",
        "               shift_and_log_scale_fn=None,\n",
        "               bijector_fn=None,\n",
        "               is_constant_jacobian=False,\n",
        "               validate_args=False,\n",
        "               name=None):\n",
        "    name = name or 'real_nvp'\n",
        "    if num_masked < 0:\n",
        "      raise ValueError('num_masked must be a non-negative integer.')\n",
        "    self._num_masked = num_masked\n",
        "    # At construction time, we don't know input_depth.\n",
        "    self._input_depth = None\n",
        "    if bool(shift_and_log_scale_fn) == bool(bijector_fn):\n",
        "      raise ValueError('Exactly one of `shift_and_log_scale_fn` and '\n",
        "                       '`bijector_fn` should be specified.')\n",
        "    if shift_and_log_scale_fn:\n",
        "      def _bijector_fn(x0, input_depth, **condition_kwargs):\n",
        "        shift, log_scale = shift_and_log_scale_fn(x0, input_depth,\n",
        "                                                  **condition_kwargs)\n",
        "        # ** First modification is here.\n",
        "        return affine_scalar.AffineScalar(shift=shift, scale=log_scale)\n",
        "\n",
        "      bijector_fn = _bijector_fn\n",
        "\n",
        "    if validate_args:\n",
        "      bijector_fn = _validate_bijector_fn(bijector_fn)\n",
        "\n",
        "    # Still do this assignment for variable tracking.\n",
        "    self._shift_and_log_scale_fn = shift_and_log_scale_fn\n",
        "    self._bijector_fn = bijector_fn\n",
        "\n",
        "    super(RealNVP, self).__init__(\n",
        "        forward_min_event_ndims=1,\n",
        "        is_constant_jacobian=is_constant_jacobian,\n",
        "        validate_args=validate_args,\n",
        "        name=name)\n",
        "\n",
        "  def _cache_input_depth(self, x):\n",
        "    if self._input_depth is None:\n",
        "      self._input_depth = tf.compat.dimension_value(\n",
        "          tensorshape_util.with_rank_at_least(x.shape, 1)[-1])\n",
        "      if self._input_depth is None:\n",
        "        raise NotImplementedError(\n",
        "            'Rightmost dimension must be known prior to graph execution.')\n",
        "      if self._num_masked >= self._input_depth:\n",
        "        raise ValueError(\n",
        "            'Number of masked units must be smaller than the event size.')\n",
        "\n",
        "  def _forward(self, x, **condition_kwargs):\n",
        "    self._cache_input_depth(x)\n",
        "    x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n",
        "    y1 = self._bijector_fn(x0, self._input_depth - self._num_masked,\n",
        "                           **condition_kwargs).forward(x1)\n",
        "    y = tf.concat([x0, y1], axis=-1)\n",
        "    return y\n",
        "\n",
        "  def _inverse(self, y, **condition_kwargs):\n",
        "    self._cache_input_depth(y)\n",
        "    y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n",
        "    x1 = self._bijector_fn(y0, self._input_depth - self._num_masked,\n",
        "                           **condition_kwargs).inverse(y1)\n",
        "    x = tf.concat([y0, x1], axis=-1)\n",
        "    return x\n",
        "\n",
        "  def _forward_log_det_jacobian(self, x, **condition_kwargs):\n",
        "    self._cache_input_depth(x)\n",
        "    x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]\n",
        "    return self._bijector_fn(x0, self._input_depth - self._num_masked,\n",
        "                             **condition_kwargs).forward_log_det_jacobian(\n",
        "                                 x1, event_ndims=1)\n",
        "\n",
        "  def _inverse_log_det_jacobian(self, y, **condition_kwargs):\n",
        "    self._cache_input_depth(y)\n",
        "    y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]\n",
        "    return self._bijector_fn(y0, self._input_depth - self._num_masked,\n",
        "                             **condition_kwargs).inverse_log_det_jacobian(\n",
        "                                 y1, event_ndims=1)\n",
        "\n",
        "def real_nvp_default_template(hidden_layers,\n",
        "                              shift_only=False,\n",
        "                              activation=tf.nn.relu,\n",
        "                              name=None,\n",
        "                              *args,  # pylint: disable=keyword-arg-before-vararg\n",
        "                              **kwargs):\n",
        "  with tf.name_scope(name or 'real_nvp_default_template'):\n",
        "\n",
        "    def _fn(x, output_units, **condition_kwargs):\n",
        "      \"\"\"Fully connected MLP parameterized via `real_nvp_template`.\"\"\"\n",
        "      if condition_kwargs:\n",
        "        raise NotImplementedError(\n",
        "            'Conditioning not implemented in the default template.')\n",
        "\n",
        "      if tensorshape_util.rank(x.shape) == 1:\n",
        "        x = x[tf.newaxis, ...]\n",
        "        reshape_output = lambda x: x[0]\n",
        "      else:\n",
        "        reshape_output = lambda x: x\n",
        "      for units in hidden_layers:\n",
        "        x = tf1.layers.dense(\n",
        "            inputs=x,\n",
        "            units=units,\n",
        "            activation=activation,\n",
        "            *args,  # pylint: disable=keyword-arg-before-vararg\n",
        "            **kwargs)\n",
        "      x = tf1.layers.dense(\n",
        "          inputs=x,\n",
        "          units=(1 if shift_only else 2) * output_units,\n",
        "          activation=None,\n",
        "          *args,  # pylint: disable=keyword-arg-before-vararg\n",
        "          **kwargs)\n",
        "      if shift_only:\n",
        "        return reshape_output(x), None\n",
        "      shift, log_scale = tf.split(x, 2, axis=-1)\n",
        "       # ** Here is the second modification.\n",
        "      return reshape_output(shift), 1e-7 + tf.nn.softplus(reshape_output(log_scale))\n",
        "\n",
        "    return tf1.make_template('real_nvp_default_template', _fn)\n",
        "\n",
        "class RealNVPBijector(tf.keras.Model):\n",
        "  def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):\n",
        "    super(RealNVPBijector, self).__init__()\n",
        "    permutations = [np.random.permutation(dimensions)\n",
        "                    for _ in range(n_couplings)]\n",
        "    bijectors = []\n",
        "    for permutation in permutations:\n",
        "      bijectors.append(RealNVP(\n",
        "        dimensions // 2,\n",
        "        real_nvp_default_template(hidden_layers, **dense_kwargs)))\n",
        "      bijectors.append(tfb.Permute(permutation))\n",
        "    self._bijector = tfb.Chain(bijectors)\n",
        "\n",
        "  def call(self, inputs):\n",
        "    return self._bijector.forward(inputs)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "colab_type": "text",
        "id": "Ax0hgIwkhplg"
      },
      "source": [
        "## Critics\n",
        "\n",
        "Here we define the encoder architectures, namely Inner product, bilinear, concat, and separable critic."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "vRguW9Fyc7uO",
        "colab": {}
      },
      "source": [
        "class InnerProdCritic(tf.keras.Model):\n",
        "  def call(self, x, y):\n",
        "    return tf.matmul(x, y, transpose_b=True)\n",
        "\n",
        "class BilinearCritic(tf.keras.Model):\n",
        "  def __init__(self, feature_dim=100, **kwargs):\n",
        "    super(BilinearCritic, self).__init__(**kwargs)\n",
        "    self._W = tfkl.Dense(feature_dim, use_bias=False)\n",
        "\n",
        "  def call(self, x, y):\n",
        "    return tf.matmul(x, self._W(y), transpose_b=True)\n",
        "\n",
        "# Copied from\n",
        "# https://colab.research.google.com/github/google-research/google-research/blob/master/vbmi/vbmi_demo.ipynb\n",
        "class ConcatCritic(tf.keras.Model):\n",
        "  def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):\n",
        "    super(ConcatCritic, self).__init__(**kwargs)\n",
        "    # output is scalar score\n",
        "    self._f = MLP([hidden_dim for _ in range(layers)]+[1], False, {\"activation\": \"relu\"})\n",
        "\n",
        "  def call(self, x, y):\n",
        "    batch_size = tf.shape(x)[0]\n",
        "    # Tile all possible combinations of x and y\n",
        "    x_tiled = tf.tile(x[None, :],  (batch_size, 1, 1))\n",
        "    y_tiled = tf.tile(y[:, None],  (1, batch_size, 1))\n",
        "    # xy is [batch_size * batch_size, x_dim + y_dim]\n",
        "    xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2),\n",
        "                          [batch_size * batch_size, -1])\n",
        "    # Compute scores for each x_i, y_j pair.\n",
        "    scores = self._f(xy_pairs) \n",
        "    return tf.transpose(tf.reshape(scores, [batch_size, batch_size]))\n",
        "\n",
        "\n",
        "class SeparableCritic(tf.keras.Model):\n",
        "  def __init__(self, hidden_dim=100, output_dim=100, layers=1,\n",
        "               activation='relu', **kwargs):\n",
        "    super(SeparableCritic, self).__init__(**kwargs)\n",
        "    self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim], False, {\"activation\": activation})\n",
        "    self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim], False, {\"activation\": activation})\n",
        "\n",
        "  def call(self, x, y):\n",
        "    x_mapped = self._f_x(x)\n",
        "    y_mapped = self._f_y(y)\n",
        "    return tf.matmul(x_mapped, y_mapped, transpose_b=True)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "colab_type": "text",
        "id": "DKhwL9cdYQ3L"
      },
      "source": [
        "# Experiments"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "colab_type": "text",
        "id": "noACZM4kiBGq"
      },
      "source": [
        "## Training loop for Section 3.1 - 3.3 in the paper\n",
        "\n",
        "Classic training loop where we update the encoder (and possibly the critic) and evaluate the model on test data."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "B9sIQHE-znsE",
        "colab": {}
      },
      "source": [
        "def train(g1,\n",
        "          g2,\n",
        "          critic,\n",
        "          loss_fn,\n",
        "          learning_rate=1e-4,\n",
        "          batch_size=TRAIN_BATCH_SIZE,\n",
        "          n_iters=15000,\n",
        "          n_evals=15,\n",
        "          compute_jacobian=False,\n",
        "          noise_std=0.0,\n",
        "          data_dimensions=DIMS//2):\n",
        "  \"\"\"Runs the training loop for a fixed model.\n",
        "\n",
        "  Args:\n",
        "    g1: Function, maps input1 to representation.\n",
        "    g2: Function, maps input2 to representation.\n",
        "    critic: Function, maps two representations to scalar.\n",
        "    loss_fn: Function, mutual information estimator.\n",
        "    learning_rate: Learning rate.\n",
        "    batch_size: Training batch size.\n",
        "    n_iters: Number of optimization iterations.\n",
        "    n_evals: Number of model evaluations.\n",
        "    compute_jacobian: Whether to estimate the singular values of the Jacobian.\n",
        "    noise_std: Standard deviation for the Gaussian noise. Default is 0.0.\n",
        "    data_dimensions: The dimension of the data. By default it's half of the\n",
        "      original data dimension.\n",
        "  Returns:\n",
        "    Returns and instance of `Results` tuple.\n",
        "  \"\"\"\n",
        "  x_1, x_2, _ = processed_train_data(data_dimensions, batch_size)\n",
        "\n",
        "  if noise_std > 0.0:\n",
        "    assert x_1.shape == x_2.shape, \"X1 and X2 shapes must agree to add noise!\"\n",
        "    noise = noise_std * tf.random.normal(x_1.shape)\n",
        "    x_1 += noise\n",
        "    x_2 += noise\n",
        "\n",
        "  # Compute the representations.\n",
        "  code_1, code_2 = g1(x_1), g2(x_2)\n",
        "  critic_matrix = critic(code_1, code_2)\n",
        "\n",
        "  # Compute the Jacobian of g1 if needed.\n",
        "  if compute_jacobian:\n",
        "    jacobian = gradients.batch_jacobian(code_1, x_1, use_pfor=False)\n",
        "    singular_values = tf.linalg.svd(jacobian, compute_uv=False)\n",
        "\n",
        "  # Optimizer setup.\n",
        "  loss = loss_fn(critic_matrix)\n",
        "  optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n",
        "  optimizer_op = optimizer.minimize(loss)\n",
        "\n",
        "  with tf.Session() as session:\n",
        "    session.run(tf.global_variables_initializer())\n",
        "\n",
        "    # Subgraph for eval (add noise to input if necessary)\n",
        "    data_ph = tf.placeholder(tf.float32, shape=[None, data_dimensions])\n",
        "    data_ph_noisy = data_ph + noise_std * tf.random.normal(tf.shape(data_ph))\n",
        "    codes = g1(data_ph_noisy)\n",
        "\n",
        "    training_losses, testing_losses, classification_accuracies, iters, sigmas \\\n",
        "      = [], [], [], [], []\n",
        "    # Main training loop.\n",
        "    for iter_n in range(n_iters):\n",
        "      # Evaluate the model performance.\n",
        "      if iter_n % (n_iters // n_evals) == 0:\n",
        "        iters.append(iter_n)\n",
        "        accuracy = get_classification_accuracy(session, codes, data_ph, data_dimensions)\n",
        "        classification_accuracies.append(accuracy)\n",
        "        testing_losses.append(\n",
        "            get_testing_loss(x_test, session, loss, data_ph, data_dimensions))\n",
        "        if compute_jacobian:\n",
        "          sigmas.append(session.run(singular_values))\n",
        "        print(\"Step {:>10d} fit {:>.5f}\".format(iter_n, accuracy))\n",
        "      # Run one optimization step.\n",
        "      loss_np, _ = session.run([loss, optimizer_op])\n",
        "      training_losses.append(loss_np)\n",
        "\n",
        "  return Results(iterations=iters,\n",
        "                 training_losses=training_losses,\n",
        "                 testing_losses=testing_losses,\n",
        "                 classification_accuracies=classification_accuracies,\n",
        "                 singular_values=sigmas)\n",
        "\n",
        "\n",
        "def run_sweep(nets, critics, loss_fns, exp_name, **kwargs):\n",
        "  \"\"\"Runs the sweep across encoder networks, critics, and the estimators.\"\"\"\n",
        "  grid = itertools.product(nets, critics, loss_fns)\n",
        "  data_frames = []\n",
        "  results_with_singular_values = []\n",
        "  for nets_name, critic_name, loss_name in grid:\n",
        "    print(\"[New experiment] encoder: {}, critic: {}, loss: {}\".format(\n",
        "        nets_name, critic_name, loss_name))\n",
        "    with tf.Graph().as_default():\n",
        "      g1, g2 = nets[nets_name]()\n",
        "      critic = critics[critic_name]()\n",
        "      loss_fn = loss_fns[loss_name]\n",
        "      results_per_run = []\n",
        "      for n in range(NRUNS):\n",
        "        try:\n",
        "          results = train(g1, g2, critic, loss_fn, **kwargs)\n",
        "          results_per_run.append(results)\n",
        "        except Exception as ex:\n",
        "          print(\"Run {} failed! Error: {}\".format(n, ex))\n",
        "      for i, result in enumerate(results_per_run):\n",
        "        data_frames.append(convert_to_data_frame(\n",
        "            result, exp_name, nets_name, critic_name, loss_name, i))\n",
        "      if kwargs.get('compute_jacobian', False):\n",
        "        results_with_singular_values.append((\n",
        "            ResultsConfig(nets_name, critic_name, loss_name), results_per_run\n",
        "        ))\n",
        "  \n",
        "  return {\n",
        "      \"df\": pd.concat(data_frames), \n",
        "      \"singular_values\": results_with_singular_values\n",
        "  }"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "colab_type": "text",
        "id": "LGCSUC4r-tfW"
      },
      "source": [
        "## Maximized MI and improved downstream performance"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "colab_type": "text",
        "id": "gUXTDLsq-xuF"
      },
      "source": [
        "Reproduces the first experiment of Section 3.1 and the corresponding Figures 1 (a, b).\n",
        "\n",
        "In this experiment we use invertible architectures. We show that training to maximize the MI estimators results in improved downstream performance, even though MI is maximized for any parameter setting (due to invertibility)."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "5XTz-B4zs8d7",
        "colab": {}
      },
      "source": [
        "#@title Run experiment or load precomputed results { display-mode: \"form\" }\n",
        "def run_all_experiments():\n",
        "  tf.reset_default_graph()\n",
        "  infonce_loss = lambda x: -infonce_lower_bound(x)\n",
        "  nwj_loss = lambda x: -nwj_lower_bound(x)\n",
        "  loss_fcts = {\n",
        "      \"nwj\": nwj_loss,\n",
        "      \"nce\": infonce_loss\n",
        "  }\n",
        "  kwargs = dict(\n",
        "      shift_only=True,\n",
        "      activation=lambda x: tf.nn.relu(x),\n",
        "      kernel_initializer=tf.initializers.truncated_normal(stddev=0.0001),\n",
        "      bias_initializer='zeros')\n",
        "  nets = {\n",
        "      \"realnvp\": lambda: (\n",
        "          RealNVPBijector(DIMS // 2, n_couplings=30, hidden_layers=[512, 512], dense_kwargs=kwargs),\n",
        "          RealNVPBijector(DIMS // 2, n_couplings=30, hidden_layers=[512, 512], dense_kwargs=kwargs)\n",
        "          )\n",
        "      }\n",
        "  critics = {\n",
        "      \"bilinear\": lambda: BilinearCritic(feature_dim=DIMS//2),\n",
        "  }\n",
        "  return run_sweep(nets, critics, loss_fcts, \"invertible\", n_iters=21000, n_evals=21)\n",
        "\n",
        "if RUN_EXPERIMENTS:\n",
        "  data_invertible = run_all_experiments()[\"df\"]\n",
        "else:\n",
        "  !wget -q -N https://storage.googleapis.com/mi_for_rl_files/mi_results.pkl\n",
        "  data_invertible = pd.read_pickle('mi_results.pkl')\n",
        "  data_invertible = data_invertible[data_invertible.exp_name == \"invertible\"]"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "8rp-GrKEli6j",
        "colab": {}
      },
      "source": [
        "#@title Downstream accuracy plot { display-mode: \"form\" }\n",
        "data = data_invertible[data_invertible.Critic.isin([\"Bilinear\"])]\n",
        "\n",
        "plt.figure()\n",
        "ax = sns.lineplot(data=data, x=\"iteration\", y=\"accuracy\", hue=\"Estimator\", ci=\"sd\");\n",
        "apply_default_style(ax)\n",
        "ax.set_ylabel(\"Accuracy\");"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "yAjY5DbSTmwa",
        "colab": {}
      },
      "source": [
        "#@title MI lower bound plot { display-mode: \"form\" }\n",
        "plt.figure()\n",
        "ax = sns.lineplot(data=data, x=\"iteration\", y=\"bound_value\", hue=\"Estimator\", ci=\"sd\",);\n",
        "apply_default_style(ax)\n",
        "ax.set_ylim(-5, 8)\n",
        "ax.set_ylabel(\"$I_{EST}$\");"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "colab_type": "text",
        "id": "o16VFAvt_i_H"
      },
      "source": [
        "## Maximized MI and worsened downstream performance"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "colab_type": "text",
        "id": "FvoPb7qV_nWB"
      },
      "source": [
        "Reproduces the second experiment of Section 3.1 and the corresponding Figure 1 (c). \n",
        "\n",
        "In this experiment we use invertible architectures. By adversarially training an encoder, we show that it is possible to significantly deteriorate downstream performance, even though MI is maximized for any parameter setting (due to invertibility)."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "-KWO_-UEzf1u",
        "colab": {}
      },
      "source": [
        "#@title Define training loop { display-mode: \"form\" }\n",
        "\n",
        "def train_adversarial(net,\n",
        "                      learning_rate=1e-3,\n",
        "                      batch_size=TRAIN_BATCH_SIZE,\n",
        "                      n_iters=4000,\n",
        "                      record_every=400,\n",
        "                      data_dimension=DIMS//2):\n",
        "  \"\"\"Runs the adversarial training loop for a fixed model.\n",
        "\n",
        "  Args:\n",
        "    net: Function, maps input to representation.\n",
        "    learning_rate: Learning rate.\n",
        "    batch_size: Training batch size.\n",
        "    n_iters: Number of optimization iterations.\n",
        "    record_every: Evaluate the model every `record_every` steps.\n",
        "    data_dimensions: The dimension of the data. By default it's half of the\n",
        "      original data dimension.\n",
        "  Returns:\n",
        "    Returns and instance of `Results` tuple.\n",
        "  \"\"\"\n",
        "\n",
        "\n",
        "  if net.__class__ is not RealNVPBijector:\n",
        "    raise ValueError(\"Only implemented for the RealNVP class.\")\n",
        "\n",
        "  # Get the data and compute the representation.\n",
        "  x_1, _, labels = processed_train_data(data_dimension, batch_size)\n",
        "  code = net(x_1)\n",
        "\n",
        "  with tf.variable_scope(\"classifier\"):\n",
        "    logits = tf.layers.dense(code, N_CLASSES)\n",
        "\n",
        "  # True classification loss for linear classifier.\n",
        "  loss_c = tf.nn.sparse_softmax_cross_entropy_with_logits(\n",
        "      logits=logits, labels=labels)\n",
        "  loss_c = tf.reduce_mean(loss_c)\n",
        "\n",
        "  # Fake classification loss for the encoder.\n",
        "  labels_unif = (1 / N_CLASSES) * tf.ones(logits.shape)\n",
        "  loss_e = tf.nn.softmax_cross_entropy_with_logits(\n",
        "      logits=logits, labels=labels_unif)\n",
        "  loss_e = tf.reduce_mean(loss_e)\n",
        "\n",
        "  # Setup the optimizers.\n",
        "  vars_e = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"real_nvp\")\n",
        "  vars_c = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"classifier\")\n",
        "\n",
        "  optimizer_c = tf.train.AdamOptimizer(learning_rate=learning_rate)\n",
        "  optimizer_e = tf.train.AdamOptimizer(learning_rate=learning_rate*0.01)\n",
        "\n",
        "  optimizer_op_c = optimizer_c.minimize(loss_c, var_list=vars_c)\n",
        "  optimizer_op_e = optimizer_e.minimize(loss_e, var_list=vars_e)\n",
        "\n",
        "\n",
        "  with tf.Session() as session:\n",
        "    session.run(tf.global_variables_initializer())\n",
        "    data_ph = tf.placeholder(tf.float32, shape=[None, data_dimension])\n",
        "    codes = net(data_ph)\n",
        "    losses_c, losses_e, classification_accuracies, iters = [], [], [], []\n",
        "\n",
        "    # Warm-up the linear classifier.\n",
        "    for _ in range(1000):\n",
        "      session.run([loss_c, optimizer_op_c])\n",
        "\n",
        "    # Run main loop.\n",
        "    for iter_n in range(n_iters):\n",
        "      if iter_n % record_every == 0:\n",
        "        iters.append(iter_n)\n",
        "        accuracy = get_classification_accuracy(\n",
        "            session, codes, data_ph, data_dimension)\n",
        "        classification_accuracies.append(accuracy)\n",
        "        print(\"Step {:>10d} fit {:>.5f}\".format(iter_n, accuracy))\n",
        "\n",
        "      # Run 10 optimization steps for the classifier.\n",
        "      for _ in range(10):\n",
        "        loss_c_np, _ = session.run([loss_c, optimizer_op_c])\n",
        "      # Run 1 optimization steps for the encoder.\n",
        "      loss_e_np, _ = session.run([loss_e, optimizer_op_e])\n",
        "      losses_c.append(loss_c_np)\n",
        "      losses_e.append(loss_e_np)\n",
        "      if iter_n % 100 == 0:\n",
        "        print(\"  loss_e {:>.5f} loss_c {:>.5f}\".format(loss_e_np, loss_c_np))\n",
        "\n",
        "  return ResultsAdversarial(\n",
        "      losses_e=losses_e,\n",
        "      losses_c=losses_c,\n",
        "      classification_accuracies=classification_accuracies,\n",
        "      iters=iters)\n"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "KRJ-EdmiV7U2",
        "colab": {}
      },
      "source": [
        "#@title Run experiment or load precomputed results { display-mode: \"form\" }\n",
        "\n",
        "def run_all_experiments():\n",
        "  tf.reset_default_graph()\n",
        "  kwargs = dict(activation=lambda x: tf.nn.relu(x),\n",
        "                      kernel_initializer=tf.initializers.truncated_normal(stddev=0.0001),\n",
        "                      bias_initializer='zeros')\n",
        "  net = RealNVPBijector(DIMS // 2, n_couplings=30, hidden_layers=[512, 512], dense_kwargs=kwargs)\n",
        "  return train_adversarial(\n",
        "      net, learning_rate=1e-3, n_iters=4001, \n",
        "      record_every=400, data_dimension=DIMS//2, batch_size=128)\n",
        "\n",
        "if RUN_EXPERIMENTS:\n",
        "  data_adversarial = run_all_experiments()\n",
        "else:\n",
        "  !wget -q -N https://storage.googleapis.com/mi_for_rl_files/adversarial_results.pkl\n",
        "  with tf.gfile.Open('adversarial_results.pkl', 'rb') as f:\n",
        "    data_adversarial = pickle.load(f, encoding='latin1')"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "-JMOE6ca_HWy",
        "colab": {}
      },
      "source": [
        "#@title Downstream accuracy plot { display-mode: \"form\" }\n",
        "plt.figure()\n",
        "plt.plot(data_adversarial.iters, data_adversarial.classification_accuracies, linewidth=2)\n",
        "ax = plt.gca()\n",
        "ax.set_ylabel(\"Accuracy\")\n",
        "apply_default_style(ax)\n",
        "ax.set_xlim([0, 4001])\n",
        "ax.set_xticklabels([0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4])\n",
        "leg = plt.legend([\"Adversarially trained\\ninvertible encoder\"], prop={'size':14});"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "colab_type": "text",
        "id": "q35Q4nvrB_fJ"
      },
      "source": [
        "## Bias towards hard-to-invert encoders"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "colab_type": "text",
        "id": "8lwsU3qhB_fW"
      },
      "source": [
        "Reproduces the third experiment of Section 3.1 and the corresponding Figures 2 (a, b, c).\n",
        "\n",
        "In this experiment we use architectures that can be both invertible and non-invertible. We show that training to maximize the MI estimators results in the encoders becoming hard to invert, in that they become locally ill-conditioned."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "-b2rZuh5WjPH",
        "colab": {}
      },
      "source": [
        "#@title Run experiment or load precomputed results { display-mode: \"form\" }\n",
        "\n",
        "def run_all_experiments():\n",
        "  tf.reset_default_graph()\n",
        "  infonce_loss = lambda x: -infonce_lower_bound(x)\n",
        "  nwj_loss = lambda x: -nwj_lower_bound(x)\n",
        "  loss_fcts = {\n",
        "      \"nwj\": nwj_loss,\n",
        "      \"nce\": infonce_loss\n",
        "  }\n",
        "  kwargs = dict(activation=\"relu\",\n",
        "                kernel_initializer=tf.initializers.truncated_normal(stddev=0.0001),\n",
        "                bias_initializer=\"zeros\")\n",
        "  nets = {\n",
        "      \"mlp\": lambda: (\n",
        "          MLP([DIMS // 2] * 5, shortcuts=True, dense_kwargs=kwargs),\n",
        "          MLP([DIMS // 2] * 5, shortcuts=True, dense_kwargs=kwargs))\n",
        "  }\n",
        "  critics = {\n",
        "      \"bilinear\": lambda: BilinearCritic(feature_dim=DIMS//2),\n",
        "  }\n",
        "  return run_sweep(nets, critics, loss_fcts,\"non_invertible\",\n",
        "                   n_iters=21000, n_evals=21, compute_jacobian=True,\n",
        "                   noise_std=0.05)\n",
        "\n",
        "if RUN_EXPERIMENTS:\n",
        "  all_results = run_all_experiments()\n",
        "  data_non_invertible = all_results[\"df\"]\n",
        "  non_invertible_singular_values = all_results[\"singular_values\"]\n",
        "\n",
        "else:\n",
        "  data_non_invertible = pd.read_pickle('mi_results.pkl')\n",
        "  data_non_invertible = data_non_invertible[data_non_invertible.exp_name == \"non_invertible\"]\n",
        "\n",
        "  !wget -q -N https://storage.googleapis.com/mi_for_rl_files/condition_numbers_results.pkl\n",
        "  with tf.gfile.Open('condition_numbers_results.pkl', 'rb') as f:\n",
        "    non_invertible_singular_values = pickle.load(f, encoding='latin1')"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "J5ebcwAP8Ssp",
        "colab": {}
      },
      "source": [
        "#@title Downstream accuracy plot { display-mode: \"form\" }\n",
        "data = data_non_invertible[data_non_invertible.Critic.isin([\"Bilinear\"])]\n",
        "plt.figure()\n",
        "ax = sns.lineplot(data=data, x=\"iteration\", y=\"accuracy\", hue=\"Estimator\", ci=\"sd\");\n",
        "apply_default_style(ax)\n",
        "ax.set_ylim([0.83, 0.9])\n",
        "ax.set_ylabel(\"Accuracy\");"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "kCz_wexf2WgK",
        "colab": {}
      },
      "source": [
        "#@title MI lower bound plot { display-mode: \"form\" }\n",
        "plt.figure()\n",
        "ax = sns.lineplot(data=data, x=\"iteration\", y=\"bound_value\", hue=\"Estimator\", ci=\"sd\");\n",
        "apply_default_style(ax)\n",
        "ax.set_ylim(-5, 8)\n",
        "ax.set_ylabel(\"$I_{EST}$\");"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "d7LUJtv4rVqy",
        "colab": {}
      },
      "source": [
        "#@title Jacobian condition number plot { display-mode: \"form\" }\n",
        "\n",
        "colors = sns.color_palette()\n",
        "\n",
        "def percentile_plot(log_condition_number, iters, pcs = 5):\n",
        "  \"\"\"Create percentile plot.\"\"\"\n",
        "  sorted_log_condition_number = np.sort(log_condition_number, axis=1)\n",
        "  n_iters, n_condition_numbers = sorted_log_condition_number.shape\n",
        "  percentiles = [i / pcs for i in range(pcs + 1)]\n",
        "  pc_idx = [int(p * (n_condition_numbers - 1)) for p in percentiles]\n",
        "\n",
        "  alpha = (\n",
        "      [i / (pcs / 2) for i in range(pcs//2 + 1)] +\n",
        "      [2 - (i / (pcs / 2)) for i in range(pcs//2 + 1, pcs + 1)]\n",
        "  )\n",
        "  alpha = [a + 0.05 for a in alpha]\n",
        "  alpha = [a / max(alpha) for a in alpha]\n",
        "\n",
        "  plt.plot(\n",
        "    iters[: n_iters], \n",
        "    sorted_log_condition_number[:, pc_idx[0]],\n",
        "    color=\"gray\", alpha=1, lw=2,\n",
        "    label=\"Minimum\",\n",
        "    linestyle=\"--\"\n",
        "  )\n",
        "  for i in range(len(pc_idx) - 1):\n",
        "    p1, p2 = pc_idx[i], pc_idx[i + 1]\n",
        "    plt.fill_between(\n",
        "        iters[: n_iters], \n",
        "        sorted_log_condition_number[:, p1],\n",
        "        sorted_log_condition_number[:, p2],\n",
        "        color=colors[i], alpha=0.75)\n",
        "    if i != 4:\n",
        "      plt.plot(\n",
        "        iters[: n_iters], \n",
        "        sorted_log_condition_number[:, pc_idx[i + 1]],\n",
        "        color=colors[i], alpha=1, lw=2,\n",
        "        label=\"%.0fth perc.\" % ((100/pcs) * (i+1)),\n",
        "        linestyle=\"--\"\n",
        "        )\n",
        "  plt.plot(\n",
        "    iters[: n_iters], \n",
        "    sorted_log_condition_number[:, pc_idx[-1]],\n",
        "    color=colors[4], alpha=1, lw=2,\n",
        "    label=\"Maximum\",\n",
        "    linestyle=\"--\"\n",
        "  )\n",
        "\n",
        "  apply_default_style(plt.gca())\n",
        "\n",
        "# As the Jacobian singular values are a batch_size x input_dim matrix per\n",
        "# iteration, we need a separate routine to extract the corresponding data\n",
        "# and aggregate it\n",
        "def aggregate_singular_values(configs_and_results):\n",
        "  data_eval = {}\n",
        "  for (config, results_all_runs) in configs_and_results:\n",
        "    label = \"{}, {}, {}\".format(config.nets, config.critic, config.loss)\n",
        "    condition_numbers_runs = []\n",
        "    for run_number, results in enumerate(results_all_runs):\n",
        "      stacked_singular_values = np.stack(results.singular_values)\n",
        "      sorted_singular_values = np.sort(stacked_singular_values, axis=-1)\n",
        "      log_condition_numbers = np.log(sorted_singular_values[..., -1]) \\\n",
        "                              - np.log(sorted_singular_values[..., 0])\n",
        "      condition_numbers_runs.append(log_condition_numbers)\n",
        "    if len(results_all_runs) > 0:\n",
        "      iterations = results_all_runs[0].iterations\n",
        "      condition_numbers = np.concatenate(condition_numbers_runs, axis=1)\n",
        "      data_eval[label] = (iterations, condition_numbers)\n",
        "  return data_eval\n",
        "\n",
        "results_dict = aggregate_singular_values(non_invertible_singular_values)\n",
        "\n",
        "for key in results_dict.keys():\n",
        "  if \"bilinear\" not in key:\n",
        "    continue\n",
        "  plt.figure()\n",
        "  its = results_dict[key][0]\n",
        "  condnbrs = results_dict[key][1]\n",
        "  percentile_plot(condnbrs, its)\n",
        "  if \"nce\" in key:\n",
        "    plt.ylabel(r\"Jacobian, $log\\ (\\sigma_1\\ /\\ \\sigma_{392})$\");\n",
        "    plt.legend(loc=\"lower right\", ncol=2)\n",
        "    plt.ylim([0, 8])\n",
        "  else:\n",
        "    plt.ylabel(\"Jacobian, $log\\ (\\sigma_1\\ /\\ \\sigma_{392})$\");\n",
        "    plt.legend(loc=\"upper left\", ncol=2)\n",
        "    plt.ylim([0, 8])"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "colab_type": "text",
        "id": "Ui2VAr9UBOns"
      },
      "source": [
        "## Looser bounds with simpler critics can lead to better representations"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "colab_type": "text",
        "id": "Ysurx0zUBhs0"
      },
      "source": [
        "Reproduces the experiment from Section 3.2 and the corresponding Figure 3.\n",
        "\n",
        "In this experiment we investigate the effect of the critic architecture on downstream performance. We find that simpler critics can result in better performance, despite leading to looser MI bounds."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "3yuppcWqXEBN",
        "colab": {}
      },
      "source": [
        "#@title Run experiment or load precomputed results { display-mode: \"form\" }\n",
        "\n",
        "def run_all_experiments():\n",
        "  tf.reset_default_graph()\n",
        "  infonce_loss = lambda x: -infonce_lower_bound(x)\n",
        "  nwj_loss = lambda x: -nwj_lower_bound(x)\n",
        "  loss_fcts = {\"nwj\": nwj_loss, \"nce\": infonce_loss}\n",
        "  nets = {\n",
        "      \"mlp\": lambda: (MLP([300, 300, 100], False, {\"activation\": \"relu\"}),\n",
        "                      MLP([300, 300, 100], False, {\"activation\": \"relu\"})),\n",
        "  }\n",
        "  critics = {\n",
        "      \"concat\": lambda: ConcatCritic(),\n",
        "      \"bilinear\": lambda: BilinearCritic(),\n",
        "      \"separable\": lambda: SeparableCritic(layers=1),\n",
        "  }\n",
        "  return run_sweep(\n",
        "    nets, critics, loss_fcts, \"critic_impact\", n_iters=21000, n_evals=21)\n",
        "\n",
        "if RUN_EXPERIMENTS:\n",
        "  data_critic_impact = run_all_experiments()[\"df\"]\n",
        "else:\n",
        "  data_critic_impact = pd.read_pickle('mi_results.pkl')\n",
        "  data_critic_impact = data_critic_impact[data_critic_impact.exp_name == \"critic_impact\"]"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "TyJax82YWPpT",
        "colab": {}
      },
      "source": [
        "#@title Downstream accuracy plot { display-mode: \"form\" }\n",
        "data = data_critic_impact\n",
        "data = data[data.Critic.isin([\"Bilinear\", \"MLP\", \"Separable\"])]\n",
        "data_nwj = data[data.Estimator.isin([\"$I_{NWJ}$\"])]\n",
        "data_nce = data[data.Estimator.isin([\"$I_{NCE}$\"])]\n",
        "\n",
        "plt.figure()\n",
        "ax = sns.lineplot(data=data_nwj, x=\"iteration\", y=\"accuracy\", hue=\"Critic\", ci=\"sd\");\n",
        "apply_default_style(ax)\n",
        "ax.set_ylabel(\"Accuracy with $I_{NWJ}$\")\n",
        "ax.set_ylim([0.8, 0.9])\n",
        "plt.figure()\n",
        "ax = sns.lineplot(data=data_nce, x=\"iteration\", y=\"accuracy\", hue=\"Critic\", ci=\"sd\");\n",
        "apply_default_style(ax)\n",
        "ax.set_ylim([0.8, 0.9])\n",
        "ax.set_ylabel(\"Accuracy with $I_{NCE}$\");"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "PjneQuoFjM1G",
        "colab": {}
      },
      "source": [
        "#@title MI lower bound plot { display-mode: \"form\" }\n",
        "plt.figure()\n",
        "ax = sns.lineplot(data=data_nwj, x=\"iteration\", y=\"bound_value\", hue=\"Critic\", ci=\"sd\");\n",
        "apply_default_style(ax)\n",
        "ax.set_ylim(2)\n",
        "plt.ylabel(\"$I_{NWJ}$\")\n",
        "plt.figure()\n",
        "ax = sns.lineplot(data=data_nce, x=\"iteration\", y=\"bound_value\", hue=\"Critic\", ci=\"sd\");\n",
        "apply_default_style(ax)\n",
        "ax.set_ylim(2)\n",
        "ax.set_ylabel(\"$I_{NCE}$\");"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "colab_type": "text",
        "id": "PKcYpomeCoUI"
      },
      "source": [
        "## Encoder architecture can be more important that the specific estimator"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "colab_type": "text",
        "id": "D7VEBx5UCpyp"
      },
      "source": [
        "Reproduces the experiment from Section 3.3 and the corresponding Figures 4 (a, b).\n",
        "\n",
        "In this experiment we show that the choice of encoder architecture can have more impact on downstream performance than the specific estimator used."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "vbvS4q1vXMlo",
        "colab": {}
      },
      "source": [
        "#@title Run experiment or load precomputed results { display-mode: \"form\" }\n",
        "\n",
        "def run_all_experiments():\n",
        "  tf.reset_default_graph()\n",
        "  loss_fcts = {}\n",
        "  loss_fcts_est = {'nwj': nwj_lower_bound, 'nce': infonce_lower_bound}\n",
        "\n",
        "  def loss_target_fn(x, fn, t):\n",
        "    return tf.abs(fn(x) - t)\n",
        "\n",
        "  for target in [4, 2]:\n",
        "    for loss_name, loss_fn in loss_fcts_est.items():\n",
        "      loss_fcts['{}-{}'.format(loss_name, target)] = functools.partial(\n",
        "          loss_target_fn, fn=loss_fn, t=target)\n",
        "\n",
        "  nets = {\n",
        "      \"convnet\": lambda: (ConvNet(), ConvNet()),\n",
        "      \"mlp\": lambda: (MLP([300, 300, 100], False, {\"activation\": \"relu\"}),\n",
        "                      MLP([300, 300, 100], False, {\"activation\": \"relu\"})),\n",
        "  }\n",
        "\n",
        "  critics = {\n",
        "      \"bilinear\": lambda: BilinearCritic(),\n",
        "  }\n",
        "  return run_sweep(nets, critics, loss_fcts, \"encoder_impact\", n_iters=21000,\n",
        "                   n_evals=21)\n",
        "\n",
        "if RUN_EXPERIMENTS:\n",
        "  data_encoder_impact = run_all_experiments()[\"df\"]\n",
        "else:\n",
        "  data_encoder_impact = pd.read_pickle('mi_results.pkl')\n",
        "  data_encoder_impact = data_encoder_impact[data_encoder_impact.exp_name == \"encoder_impact\"]"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "VtgT2m2uwRfb",
        "colab": {}
      },
      "source": [
        "#@title Downstream accuracy and testing loss plots { display-mode: \"form\" }\n",
        "data = data_encoder_impact\n",
        "data = data[data.Critic == \"Bilinear\"].copy()\n",
        "\n",
        "data[\"label\"].replace(to_replace={\n",
        "    \"convnet, bilinear, nwj-2\": \"ConvNet $(I_{NWJ}, t=2)$\",\n",
        "    \"convnet, bilinear, nwj-4\": \"ConvNet $(I_{NWJ}, t=4)$\",\n",
        "    \"mlp, bilinear, nwj-2\": \"MLP $(I_{NWJ}, t=2)$\",\n",
        "    \"mlp, bilinear, nwj-4\": \"MLP $(I_{NWJ}, t=4)$\",\n",
        "    \"convnet, bilinear, nce-2\": \"ConvNet $(I_{NCE}, t=2)$\",\n",
        "    \"convnet, bilinear, nce-4\": \"ConvNet $(I_{NCE}, t=4)$\",\n",
        "    \"mlp, bilinear, nce-2\": \"MLP $(I_{NCE}, t=2)$\",\n",
        "    \"mlp, bilinear, nce-4\": \"MLP $(I_{NCE}, t=4)$\",\n",
        "  }, inplace=True)\n",
        "\n",
        "# We are trying to reach a given bound of t, hence it is minimized\n",
        "data[\"bound_value\"] *= -1\n",
        "data_nwj = data[data.Estimator.isin([\"nwj-2\", \"nwj-4\"])]\n",
        "data_nce = data[data.Estimator.isin([\"nce-2\", \"nce-4\"])]\n",
        "\n",
        "del data # Make sure that `data` is not used by accident below.\n",
        "\n",
        "hue_ordering = np.unique(data_nwj.label.values)\n",
        "\n",
        "plt.rcParams.update({'legend.fontsize': 13})\n",
        "plt.figure()\n",
        "ax = sns.lineplot(data=data_nwj, x=\"iteration\", y=\"accuracy\", hue=\"label\", ci=\"sd\", hue_order=hue_ordering)\n",
        "apply_default_style(ax)\n",
        "ax.set_ylim([0.78, 0.92])\n",
        "ax.set_ylabel(\"Accuracy with $I_{NWJ}$\")\n",
        "\n",
        "hue_ordering = np.unique(data_nce.label.values)\n",
        "\n",
        "plt.figure()\n",
        "ax = sns.lineplot(data=data_nce, x=\"iteration\", y=\"accuracy\", hue=\"label\", ci=\"sd\", hue_order=hue_ordering)\n",
        "apply_default_style(ax)\n",
        "ax.set_ylim([0.78, 0.92])\n",
        "ax.set_ylabel(\"Accuracy with $I_{NCE}$\");\n",
        "plt.rcParams.update({'legend.fontsize': FONTSIZE})\n",
        "\n",
        "# Loss values\n",
        "plt.figure()\n",
        "hue_ordering = np.unique(data_nwj.label.values)\n",
        "\n",
        "ax = sns.lineplot(data=data_nwj, x=\"iteration\", y=\"bound_value\", hue=\"label\", ci=\"sd\",\n",
        "                  hue_order=hue_ordering)\n",
        "apply_default_style(ax)\n",
        "handles, labels = ax.get_legend_handles_labels()\n",
        "plt.legend(loc=\"upper right\", handles=handles[1:], labels=labels[1:])\n",
        "ax.set_ylabel(\"$L_t(g_1, g_2), I_{NWJ}$\")\n",
        "\n",
        "plt.figure()\n",
        "hue_ordering = np.unique(data_nce.label.values)\n",
        "ax = sns.lineplot(data=data_nce, x=\"iteration\", y=\"bound_value\", hue=\"label\", ci=\"sd\",\n",
        "                  hue_order=hue_ordering)\n",
        "apply_default_style(ax)\n",
        "handles, labels = ax.get_legend_handles_labels()\n",
        "plt.legend(loc=\"upper right\", handles=handles[1:], labels=labels[1:])\n",
        "ax.set_ylabel(\"$L_t(g_1, g_2), I_{NCE}$\");"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "colab_type": "text",
        "id": "5FbkpQ1dCZcq"
      },
      "source": [
        "## InfoNCE and the importance of negative sampling"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "colab_type": "text",
        "id": "oFGeDtbYCaax"
      },
      "source": [
        "Reproduces the experiment in Section 4 and the corresponding Figure 4 (c).\n",
        "\n",
        "In this experiment we show empirically that both $I_{NCE}$ and $I_{NWJ}$ estimators are not in general lower bounds on MI when samples are not drawn iid."
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "colab_type": "text",
        "id": "ass3uDp7tUDG"
      },
      "source": [
        "The InfoNCE objective is only provably a lower bound on the true mutual information if all of the samples $(X_i, Y_i)$ are drawn iid from the joint distribution $p(x,y)$. Here we demonstrate in with a simple synthetic example that when the $(X_i, Y_i)$ are drawn in a dependent fashion, InfoNCE can actually be larger than the true mutual information.\n",
        "\n",
        "We will draw a batch $(X_i, Y_i)$ as follows.\n",
        "\n",
        "First sample $Z \\sim \\mathcal{N}\\left(0, \\begin{bmatrix}1 & -0.5\\\\ -0.5 & 1\\end{bmatrix}\\right)$.\n",
        "\n",
        "Then sample $\\epsilon_i \\sim \\mathcal{N}\\left(0, \\begin{bmatrix}1 & 0.9\\\\ 0.9 & 1\\end{bmatrix}\\right)$ iid, and set $(X_i, Y_i) = Z + \\epsilon_i$.\n",
        "\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "colab_type": "text",
        "id": "GRJtFmRptUDJ"
      },
      "source": [
        "Then each $(X_i, Y_i)$ has marginal distribution $\\mathcal{N}\\left(0, \\begin{bmatrix}2 & 0.4\\\\ 0.4 & 2\\end{bmatrix}\\right)$, but note that the samples within a batch are dependent.\n",
        "\n",
        "For a bivariate Gaussian $(X,Y) \\sim \\mathcal{N}\\left(0, \\Sigma\\right)$ we have that $I(X,Y) = -0.5\\log(1-\\rho^2)$  where $\\rho^2 = \\Sigma_{12}\\Sigma_{21} / \\Sigma_{11}\\Sigma_{22}$."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "EYPaInIktUDP",
        "colab": {}
      },
      "source": [
        "#@title Define training loop { display-mode: \"form\" }\n",
        "\n",
        "def train_bound(xy,\n",
        "                estimator=\"nce\",\n",
        "                hidden_dim=10,\n",
        "                layers=5,\n",
        "                learning_rate=1e-4,\n",
        "                n_iters=20000):\n",
        "  \"\"\"Estimates the MI lower-bound using a simple concat critic.\"\"\"\n",
        "\n",
        "  if estimator not in [\"nce\", \"nwj\"]:\n",
        "    raise ValueError(\n",
        "        \"estimator must be one of 'nce', 'nwj', not: {}\".format(estimator))\n",
        "\n",
        "  critic = ConcatCritic(hidden_dim=hidden_dim, layers=layers, activation='relu')\n",
        "  scores = critic(xy[:, 0, None], xy[:, 1, None])\n",
        "\n",
        "  if estimator == \"nce\":\n",
        "    bound = infonce_lower_bound(scores)\n",
        "  else:\n",
        "    bound = nwj_lower_bound(scores)\n",
        "\n",
        "  # Optimizer setup.\n",
        "  optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n",
        "  optimizer_op = optimizer.minimize(-bound)\n",
        "\n",
        "  # Main training loop\n",
        "  with tf.Session() as session:\n",
        "    session.run(tf.global_variables_initializer())\n",
        "    bound_estimates = []\n",
        "    for iter_n in range(n_iters):\n",
        "      bound_np, _ = session.run([bound, optimizer_op])\n",
        "      bound_estimates.append(bound_np)\n",
        "      if iter_n % 1000 == 0:\n",
        "        print(\"Step {:>10d} {} {:>.5f}\".format(iter_n, estimator, bound_np))\n",
        "    return bound_estimates\n",
        "\n",
        "def mi_from_sigma(sigma):\n",
        "  rho_sq = sigma[0,1] * sigma[1,0] / (sigma[0,0] * sigma[1,1])\n",
        "  return -0.5 * np.log(1 - (rho_sq))\n",
        "\n",
        "def run_all_experiments():\n",
        "\n",
        "  bs = TRAIN_BATCH_SIZE\n",
        "  sigma_z = np.array([[1.0, -0.5],[-0.5, 1.0]])\n",
        "  sigma_eps = np.array([[1.0, 0.9],[0.9, 1.0]], dtype=np.float64)\n",
        "\n",
        "  z = tfd.MultivariateNormalFullCovariance(loc=0, covariance_matrix=sigma_z)\n",
        "  eps = tfd.MultivariateNormalFullCovariance(loc=0, covariance_matrix=sigma_eps)\n",
        "\n",
        "  z_sample = tf.cast(z.sample(1), tf.float32)\n",
        "  eps_sample = tf.cast(eps.sample(bs), tf.float32)\n",
        "\n",
        "  xy = z_sample + eps_sample\n",
        "\n",
        "  mi_true = mi_from_sigma(sigma_eps + sigma_z)\n",
        "\n",
        "  # Let's estimate the MI using InfoNCE with our non-iid samples\n",
        "  nce_estimates = train_bound(xy, 'nce')\n",
        "\n",
        "  # We'll also estimate the MI using the NWJ estimator.\n",
        "  nwj_estimates = train_bound(xy, 'nwj')\n",
        "\n",
        "  # Now as a sanity check, let's also evaluate the InfoNCE estimator using proper iid samples\n",
        "  sigma_xy = sigma_z + sigma_eps\n",
        "\n",
        "  xy_iid = tfd.MultivariateNormalFullCovariance(\n",
        "      loc=0, covariance_matrix=sigma_xy).sample(bs)\n",
        "  xy_iid = tf.cast(xy_iid, tf.float32)\n",
        "\n",
        "  # Compute the estimates using IID samples.\n",
        "  nce_estimates_iid = train_bound(xy_iid, 'nce')\n",
        "  nwj_estimates_iid = train_bound(xy_iid, 'nwj')\n",
        "\n",
        "  return ResultsSamplingIssues(\n",
        "      mi_true=mi_true,\n",
        "      nce_estimates_noniid=nce_estimates,\n",
        "      nce_estimates_iid=nce_estimates_iid,\n",
        "      nwj_estimates_noniid=nwj_estimates,\n",
        "      nwj_estimates_iid=nwj_estimates_iid)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "-xeHsMXh05au",
        "colab": {}
      },
      "source": [
        "#@title Run experiment or load precomputed results { display-mode: \"form\" }\n",
        "if RUN_EXPERIMENTS:\n",
        "  data_noniid_sampling = run_all_experiments()\n",
        "else:\n",
        "  !wget -q -N https://storage.googleapis.com/mi_for_rl_files/noniid_results.pkl\n",
        "  with tf.gfile.Open('noniid_results.pkl', 'rb') as f:\n",
        "    data_noniid_sampling = pickle.load(f, encoding='latin1')"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "X84YlGbctUDl",
        "colab": {}
      },
      "source": [
        "#@title i.i.d. vs non-i.i.d. sampling plot { display-mode: \"form\" }\n",
        "results = data_noniid_sampling\n",
        "steps = [i for i in range(len(results.nce_estimates_iid))]\n",
        "\n",
        "plt.rcParams.update({'axes.labelsize': FONTSIZE,\n",
        "                     'xtick.labelsize': FONTSIZE,\n",
        "                     'ytick.labelsize': FONTSIZE,\n",
        "                     'legend.fontsize': FONTSIZE,\n",
        "                     'lines.linewidth': 2})\n",
        "plt.figure()\n",
        "ax = plt.gca()\n",
        "plt.axhline(y=results.mi_true, ls='--', color='k', label=\"True MI\")\n",
        "\n",
        "ax.plot(steps, gaussian_filter1d(results.nce_estimates_noniid, 100),\n",
        "        label=\"$I_{NCE}$, non-i.i.d. samples\")\n",
        "ax.plot(steps, gaussian_filter1d(results.nce_estimates_iid, 100),\n",
        "        label=\"$I_{NCE}$, I.i.d. samples\")\n",
        "\n",
        "steps = [i for i in range(len(results.nwj_estimates_iid))]\n",
        "\n",
        "ax.plot(steps, gaussian_filter1d(results.nwj_estimates_noniid, 100),\n",
        "        label=\"$I_{NWJ}$, non-i.i.d. samples\")\n",
        "ax.plot(steps, gaussian_filter1d(results.nwj_estimates_iid, 100),\n",
        "        label=\"$I_{NWJ}$, i.i.d. samples\")\n",
        "\n",
        "apply_default_style(ax)\n",
        "ax.set_ylabel('$I_{EST}$')\n",
        "ax.set_ylim(-0.3, 0.25)\n",
        "plt.legend(loc=\"lower right\", prop={'size':13})"
      ],
      "execution_count": 0,
      "outputs": []
    }
  ]
}