{
  "cells": [
    {
      "cell_type": "code",
      "execution_count": 0,
      "metadata": {
        "cellView": "both",
        "colab": {},
        "colab_type": "code",
        "id": "dVNR5qk5mIA0"
      },
      "outputs": [],
      "source": [
        "# 'Licensed under the Apache License, Version 2.0'\n",
        "#@title Imports + Global Variables + Helpers\n",
        "\n",
        "import copy\n",
        "import random\n",
        "import itertools\n",
        "import numpy as np\n",
        "import matplotlib.pyplot as plt\n",
        "import math\n",
        "import tensorflow as tf\n",
        "\n",
        "num_states = 2\n",
        "num_actions = 2\n",
        "gamma = 0.9\n",
        "\n",
        "plt.style.use('classic')\n",
        "color = (152./255, 225./255, 152./255)\n",
        "\n",
        "#https://stackoverflow.com/questions/42157781/block-diagonal-matrices-in-tensorflow\n",
        "def block_diagonal(matrices, dtype=tf.float64):\n",
        "  r\"\"\"Constructs block-diagonal matrices from a list of batched 2D tensors.\n",
        "\n",
        "  Args:\n",
        "    matrices: A list of Tensors with shape [..., N_i, M_i] (i.e. a list of\n",
        "      matrices with the same batch dimension).\n",
        "    dtype: Data type to use. The Tensors in `matrices` must match this dtype.\n",
        "  Returns:\n",
        "    A matrix with the input matrices stacked along its main diagonal, having\n",
        "    shape [..., \\sum_i N_i, \\sum_i M_i].\n",
        "\n",
        "  \"\"\"\n",
        "  matrices = [tf.convert_to_tensor(matrix, dtype=dtype) for matrix in matrices]\n",
        "  blocked_rows = tf.Dimension(0)\n",
        "  blocked_cols = tf.Dimension(0)\n",
        "  batch_shape = tf.TensorShape(None)\n",
        "  for matrix in matrices:\n",
        "    full_matrix_shape = matrix.get_shape().with_rank_at_least(2)\n",
        "    batch_shape = batch_shape.merge_with(full_matrix_shape[:-2])\n",
        "    blocked_rows += full_matrix_shape[-2]\n",
        "    blocked_cols += full_matrix_shape[-1]\n",
        "  ret_columns_list = []\n",
        "  for matrix in matrices:\n",
        "    matrix_shape = tf.shape(matrix)\n",
        "    ret_columns_list.append(matrix_shape[-1])\n",
        "  ret_columns = tf.add_n(ret_columns_list)\n",
        "  row_blocks = []\n",
        "  current_column = 0\n",
        "  for matrix in matrices:\n",
        "    matrix_shape = tf.shape(matrix)\n",
        "    row_before_length = current_column\n",
        "    current_column += matrix_shape[-1]\n",
        "    row_after_length = ret_columns - current_column\n",
        "    row_blocks.append(tf.pad(\n",
        "        tensor=matrix,\n",
        "        paddings=tf.concat(\n",
        "            [tf.zeros([tf.rank(matrix) - 1, 2], dtype=tf.int32),\n",
        "             [(row_before_length, row_after_length)]],\n",
        "            axis=0)))\n",
        "  blocked = tf.concat(row_blocks, -2)\n",
        "  blocked.set_shape(batch_shape.concatenate((blocked_rows, blocked_cols)))\n",
        "  return blocked"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 0,
      "metadata": {
        "cellView": "both",
        "colab": {},
        "colab_type": "code",
        "id": "IJnmDHdLUmBk"
      },
      "outputs": [],
      "source": [
        "#@title Define the MDP\n",
        "# define random reward function\n",
        "r = np.random.uniform(-1, 1, num_states*num_actions)\n",
        "\n",
        "# define random transition function\n",
        "alphas = np.ones(num_states)\n",
        "P = np.zeros((num_states*num_actions, num_states))\n",
        "for sa in range(num_states*num_actions):\n",
        "  P[sa] = np.random.dirichlet(alphas) \n",
        "\n",
        "# MDP defined in Section 5\n",
        "# r = [[r(a1, s1), r(a2, s1)], [r(a1, s2), r(a2, s2)]]\n",
        "r = np.array([-0.45, -0.1,  0.5,  0.5])\n",
        "\n",
        "# P = [\n",
        "# ..[P(s1| a1, s1), P(s2| a1, s1)],\n",
        "# ..[P(s1| a2, s1), P(s2| a2, s1)],\n",
        "# ..[P(s1| a1, s2), P(s2| a1, s2)],\n",
        "# ..[P(s1| a2, s2), P(s2| a2, s2)]\n",
        "#]\n",
        "P = np.array([[ 0.7,  0.3],\n",
        "       [ 0.99,  0.01],\n",
        "       [ 0.2,  0.8],\n",
        "       [ 0.99,  0.01]])"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 0,
      "metadata": {
        "cellView": "both",
        "colab": {},
        "colab_type": "code",
        "id": "bYnnUoHtj5d9"
      },
      "outputs": [],
      "source": [
        "#@title Value Functions\n",
        "num_samples = 50000\n",
        "value_functions = []\n",
        "for _ in range(num_samples):\n",
        "  alphas = np.ones(num_actions)\n",
        "  Pi = np.zeros((num_states, num_states*num_actions))\n",
        "  for s in range(num_states):\n",
        "    Pi[s, s*num_actions:(s+1)*num_actions] = np.random.dirichlet(alphas)\n",
        "\n",
        "  P_pi = np.matmul(Pi, P)\n",
        "  r_pi = np.matmul(Pi, r)\n",
        "\n",
        "  V_pi = np.matmul(np.linalg.inv((np.eye(num_states) - gamma*P_pi)), r_pi)\n",
        "  value_functions.append(V_pi)\n",
        "\n",
        "xmin = min(V[0] for V in value_functions)\n",
        "xmax = max(V[0] for V in value_functions)\n",
        "ymin = min(V[1] for V in value_functions)\n",
        "ymax = max(V[1] for V in value_functions)\n",
        "eps = 0.2\n",
        "\n",
        "def cfg_axes(ax):\n",
        "  ax.spines['right'].set_visible(False)\n",
        "  ax.spines['top'].set_visible(False)\n",
        "  ax.set_xlim([xmin-eps, xmax+eps])\n",
        "  ax.set_ylim([ymin-eps, ymax+eps])\n",
        "  ax.tick_params(\n",
        "      axis='both',\n",
        "      which='both',\n",
        "      bottom=False,\n",
        "      top=False, \n",
        "      left=False,\n",
        "      right=False,\n",
        "      labelleft=False,\n",
        "      labelbottom=False)\n",
        "  \n",
        "plt.figure(figsize=(5,5))\n",
        "plt.scatter(*zip(*value_functions), c=color, edgecolors=color)\n",
        "ax = plt.gca()\n",
        "cfg_axes(ax)\n",
        "plt.show()\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 0,
      "metadata": {
        "cellView": "both",
        "colab": {},
        "colab_type": "code",
        "id": "IjcTNZG4RaIh"
      },
      "outputs": [],
      "source": [
        "#@title Value Functions of Deterministic Policies\n",
        "set_deter_actions = []\n",
        "for i in range(num_actions):\n",
        "  deter_action = np.zeros(num_actions)\n",
        "  deter_action[i] = 1\n",
        "  set_deter_actions.append(deter_action)\n",
        "\n",
        "value_functions_deter = []\n",
        "for policies in itertools.product(set_deter_actions, repeat=num_states):\n",
        "  Pi = np.zeros((num_states, num_states*num_actions))\n",
        "  for s in range(num_states):\n",
        "    Pi[s, s*num_actions:(s+1)*num_actions] = policies[s]\n",
        "\n",
        "  P_pi = np.matmul(Pi, P)\n",
        "  r_pi = np.matmul(Pi, r)\n",
        "\n",
        "  V_pi = np.matmul(np.linalg.inv((np.eye(num_states) - gamma*P_pi)), r_pi)\n",
        "  value_functions_deter.append(V_pi)\n",
        "  \n",
        "plt.figure(figsize=(5,5))\n",
        "plt.scatter(*zip(*value_functions), color=color)\n",
        "plt.scatter(*zip(*value_functions_deter), color='red', s=100)\n",
        "ax = plt.gca()\n",
        "cfg_axes(ax)\n",
        "plt.show()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 0,
      "metadata": {
        "cellView": "both",
        "colab": {},
        "colab_type": "code",
        "id": "rXKCtCOQ6JrK"
      },
      "outputs": [],
      "source": [
        "#@title Value functions of semi-deterministic policies\n",
        "params_sd_policies = []\n",
        "for s in range(num_states):\n",
        "  for a in range(num_actions):\n",
        "    p = np.zeros(num_actions)\n",
        "    p[a] = 1\n",
        "    params_sd_policies.append((s, p))\n",
        "\n",
        "value_functions_semi_deter = []\n",
        "for params in params_sd_policies:\n",
        "  state, state_policy = params\n",
        " \n",
        "  for _ in range(1000):    \n",
        "    alphas = np.ones(num_actions)\n",
        "    Pi = np.zeros((num_states, num_states*num_actions))\n",
        "    for s in range(num_states):\n",
        "      Pi[s, s*num_actions:(s+1)*num_actions] = np.random.dirichlet(alphas)\n",
        "    Pi[state, state*num_actions:(state+1)*num_actions] = state_policy    \n",
        "\n",
        "    P_pi = np.matmul(Pi, P)\n",
        "    r_pi = np.matmul(Pi, r)\n",
        "    V_pi = np.matmul(np.linalg.inv((np.eye(num_states) - gamma*P_pi)), r_pi)\n",
        "    value_functions_semi_deter.append(V_pi)\n",
        "    \n",
        "\n",
        "plt.figure(figsize=(5,5))\n",
        "plt.scatter(*zip(*value_functions), color=color)\n",
        "plt.scatter(*zip(*value_functions_semi_deter), color='orange')\n",
        "ax = plt.gca()\n",
        "cfg_axes(ax)\n",
        "plt.show()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 0,
      "metadata": {
        "cellView": "both",
        "colab": {},
        "colab_type": "code",
        "id": "giZx898N2Jh2"
      },
      "outputs": [],
      "source": [
        "#@title The line theorem\n",
        "alphas = np.ones(num_actions)\n",
        "Pi = np.zeros((num_states, num_states*num_actions))\n",
        "for s in range(num_states):\n",
        "  Pi[s, s*num_actions:(s+1)*num_actions] = np.random.dirichlet(alphas)\n",
        "\n",
        "policies = []\n",
        "state = random.randint(0, num_states-1)\n",
        "for _ in range(1000):\n",
        "  new_Pi = copy.deepcopy(Pi)\n",
        "  new_Pi[state, state*num_actions:(state+1)*num_actions] = np.random.dirichlet(alphas)\n",
        "  policies.append(new_Pi)\n",
        "\n",
        "value_functions_lines = []\n",
        "for pi in policies:\n",
        "  P_pi = np.matmul(pi, P)\n",
        "  r_pi = np.matmul(pi, r)\n",
        "  V_pi = np.matmul(np.linalg.inv((np.eye(num_states) - gamma*P_pi)), r_pi)\n",
        "  value_functions_lines.append(V_pi)\n",
        "\n",
        "plt.figure(figsize=(5,5))\n",
        "plt.scatter(*zip(*value_functions), color=color)\n",
        "plt.scatter(*zip(*value_functions_lines), color='orange')\n",
        "ax = plt.gca()\n",
        "cfg_axes(ax)\n",
        "plt.show()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 0,
      "metadata": {
        "cellView": "both",
        "colab": {},
        "colab_type": "code",
        "id": "XJ-vBsaebiuK"
      },
      "outputs": [],
      "source": [
        "#@title Value functions of mixtures of 2 deterministic policies \n",
        "# generate deterministic policies\n",
        "pol = []\n",
        "for policies in itertools.product(set_deter_actions, repeat=num_states):\n",
        "  Pi = np.zeros((num_states, num_states*num_actions))\n",
        "  for s in range(num_states):\n",
        "    Pi[s, s*num_actions:(s+1)*num_actions] = policies[s]\n",
        "  pol.append(Pi)\n",
        "\n",
        "# pick two policies and their mixtures\n",
        "policies = []\n",
        "Pi_1 = pol[0]\n",
        "Pi_2 = pol[3]\n",
        "for x in np.linspace(0, 1, 500):\n",
        "  new_Pi = x*Pi_1 + (1-x)*Pi_2\n",
        "  policies.append(new_Pi)\n",
        "\n",
        "value_functions_mixtures = []\n",
        "for pi in policies:\n",
        "  P_pi = np.matmul(pi, P)\n",
        "  r_pi = np.matmul(pi, r)\n",
        "  V_pi = np.matmul(np.linalg.inv((np.eye(num_states) - gamma*P_pi)), r_pi)\n",
        "  value_functions_mixtures.append(V_pi)\n",
        "\n",
        "plt.figure(figsize=(5,5))\n",
        "plt.scatter(*zip(*value_functions), color=color)\n",
        "plt.scatter(*zip(*value_functions_mixtures), color='orange')\n",
        "ax = plt.gca()\n",
        "cfg_axes(ax)\n",
        "plt.show()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 0,
      "metadata": {
        "cellView": "both",
        "colab": {},
        "colab_type": "code",
        "id": "d5l8WoTEfmET"
      },
      "outputs": [],
      "source": [
        "#@title Define starting points for learning dynamics\n",
        "\n",
        "def idx(s, a):\n",
        "  return s*num_actions + a\n",
        "\n",
        "def get_value_function_from_logits(thetas):\n",
        "  pis = [[1 / (1 + math.exp(-x)), math.exp(-x)/ (1 + math.exp(-x))] for x in thetas]\n",
        "  Pi = np.zeros((num_states, num_states*num_actions))\n",
        "  for s, pi in zip(range(num_states), pis):\n",
        "    Pi[s, s*num_actions:(s+1)*num_actions] = pi\n",
        "\n",
        "  P_pi = np.matmul(Pi, P)\n",
        "  r_pi = np.matmul(Pi, r)\n",
        "\n",
        "  V_pi = np.matmul(np.linalg.inv((np.eye(num_states) - gamma*P_pi)), r_pi)\n",
        "  return V_pi\n",
        "\n",
        "logits = [np.array([5., -5.]), np.array([-1., -5.]),  np.array([-1., 0.])]\n",
        "value_functions_starting_points = []\n",
        "for logit in logits:\n",
        "  V_pi = get_value_function_from_logits(logit)\n",
        "  value_functions_starting_points.append(V_pi)\n",
        "\n",
        "plt.figure(figsize=(5,5))\n",
        "plt.scatter(*zip(*value_functions), color=color)\n",
        "plt.scatter(*zip(*value_functions_starting_points), color='red', s=100)\n",
        "ax = plt.gca()\n",
        "cfg_axes(ax)\n",
        "plt.show()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 0,
      "metadata": {
        "cellView": "both",
        "colab": {},
        "colab_type": "code",
        "id": "qlhlyWzag-dg"
      },
      "outputs": [],
      "source": [
        "#@title Value Iteration\n",
        "fig, ax = plt.subplots(nrows=1, ncols=len(logits), figsize=(15, 5))\n",
        "plt.subplots_adjust(wspace=0.1)\n",
        "num_iterations = 50\n",
        "\n",
        "for i, logit in enumerate(logits):\n",
        "  value_functions_vi = [get_value_function_from_logits(logit)]\n",
        "  for _ in range(num_iterations):\n",
        "    V = np.zeros(num_states) # placeholder\n",
        "    for s in range(num_states):\n",
        "      V[s] = max(r[idx(s,a)] + gamma * np.dot(P[idx(s, a)], value_functions_vi[-1]) for a in range(num_actions))\n",
        "    value_functions_vi.append(V)\n",
        "\n",
        "  ax[i].scatter(*zip(*value_functions), color=color)\n",
        "\n",
        "  cfg_axes(ax[i])\n",
        "  ax[i].scatter(*zip(*value_functions_vi), c=np.arange(len(value_functions_vi)))\n",
        "    \n",
        "plt.show()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 0,
      "metadata": {
        "cellView": "both",
        "colab": {},
        "colab_type": "code",
        "id": "DvDY_jXItuSM"
      },
      "outputs": [],
      "source": [
        "#@title Policy iteration\n",
        "num_iterations = 2\n",
        "fig, ax = plt.subplots(nrows=1, ncols=len(logits), figsize=(15, 5))\n",
        "plt.subplots_adjust(wspace=0.1)\n",
        "\n",
        "for i, logit in enumerate(logits):\n",
        "  V_pi = get_value_function_from_logits(logit)\n",
        "  value_functions_pi = [V_pi]\n",
        "  for cnt in range(num_iterations):\n",
        "    Pi = np.zeros((num_states, num_states*num_actions))\n",
        "    for s in range(num_states):\n",
        "      best_action = np.argmax([r[idx(s,a)] + gamma * np.dot(P[idx(s, a)], V_pi) for a in range(num_actions)]) \n",
        "      pi = np.zeros(num_actions)\n",
        "      pi[best_action] = 1\n",
        "      Pi[s, s*num_actions:(s+1)*num_actions] = pi\n",
        "\n",
        "    P_pi = np.matmul(Pi, P)\n",
        "    r_pi = np.matmul(Pi, r)\n",
        "    V_pi = np.matmul(np.linalg.inv((np.eye(num_states) - gamma*P_pi)), r_pi)\n",
        "    value_functions_pi.append(V_pi)\n",
        "\n",
        "  ax[i].scatter(*zip(*value_functions), color=color)\n",
        "  ax[i].scatter(*zip(*value_functions_pi), color='blue')\n",
        "  cfg_axes(ax[i])\n",
        "  ax[i].tick_params(axis='both', which='major', labelsize=16)\n",
        "\n",
        "plt.show()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 0,
      "metadata": {
        "cellView": "both",
        "colab": {},
        "colab_type": "code",
        "id": "8IdPh-pAec_W"
      },
      "outputs": [],
      "source": [
        "#@title Policy Gradient + Entropy Regularization + Natural Policy Gradient\n",
        "\n",
        "learning_rate = 1.\n",
        "num_iterations = 50\n",
        "entropy = False\n",
        "natural_gradients = True\n",
        "\n",
        "fig, ax = plt.subplots(nrows=1, ncols=len(logits), figsize=(15, 5))\n",
        "plt.subplots_adjust(wspace=0.1)\n",
        "print \"Entropy:\", entropy\n",
        "print \"Natural Gradients:\", natural_gradients\n",
        "\n",
        "for i, logit in enumerate(logits):\n",
        "  thetas = logit\n",
        "  thetas = [tf.Variable(theta, dtype=tf.float64) for theta in thetas]\n",
        "  pi = [[tf.nn.sigmoid(theta),1- tf.nn.sigmoid(theta)] for theta in thetas]\n",
        "  block_pi = block_diagonal([tf.expand_dims(p, 0) for p in pi])\n",
        "\n",
        "  P_pi = tf.matmul(block_pi, tf.constant(P))\n",
        "  r_pi = tf.matmul(block_pi, tf.expand_dims(tf.constant(r), 1))\n",
        "  V_pi = tf.matmul(tf.linalg.inv(tf.eye(num_states, dtype=tf.float64) - gamma*P_pi), r_pi)\n",
        "\n",
        "  avg_return = tf.reduce_mean(V_pi)\n",
        "  if entropy:\n",
        "    for s in range(num_states):\n",
        "      avg_return -= 1.*tf.reduce_mean(tf.multiply(pi[s], tf.math.log(pi[s])))\n",
        "  \n",
        "  opt = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n",
        "  grads_and_vars = opt.compute_gradients(-avg_return, thetas)\n",
        "  \n",
        "  # replace None gradients by zero\n",
        "  log_pi_grad = []\n",
        "  for k in range(num_states):\n",
        "    gradients = [tf.gradients(tf.log(pi[k][j]), thetas) for j in range(num_actions)]\n",
        "    processed_gradients = []\n",
        "    for grad_list in gradients:\n",
        "      new_gradients = [grad if grad is not None else tf.constant(0., dtype=tf.float64) for grad in grad_list]\n",
        "      processed_gradients.append(tf.stack(new_gradients))\n",
        "    log_pi_grad.append(processed_gradients)\n",
        "\n",
        "  # define fisher information matrix\n",
        "  fisher = tf.zeros([2, 2], dtype=tf.float64)\n",
        "  for k, log_pi_state in enumerate(log_pi_grad): \n",
        "    for j, log_pi_state_action in enumerate(log_pi_state):\n",
        "      fisher += pi[k][j] * tf.matmul(log_pi_state_action[:, None], log_pi_state_action[None, :])\n",
        "  fisher_inv = tf.linalg.inv(fisher + 0.0001*tf.eye(2, dtype=tf.float64))\n",
        "  \n",
        "  # condition gradients\n",
        "  grads = [gv[0] for gv in grads_and_vars]\n",
        "  grads_tensor = tf.stack(grads)\n",
        "  conditioned_grads = tf.matmul(fisher_inv, grads_tensor[:, None])\n",
        "  conditioned_grads = [conditioned_grads[k, 0] for k in range(num_states)]\n",
        "  conditioned_grads_and_vars = [(grad, old_grad_vars[1]) for grad, old_grad_vars in zip(conditioned_grads, grads_and_vars)]\n",
        "  \n",
        "  if natural_gradients:\n",
        "    train_op = opt.apply_gradients(conditioned_grads_and_vars)\n",
        "  else:\n",
        "    train_op = opt.apply_gradients(grads_and_vars)\n",
        "\n",
        "  value_functions_pg = []\n",
        "  with tf.Session() as sess:\n",
        "    tf.initialize_all_variables().run()\n",
        "    for _ in range(num_iterations):\n",
        "      value_functions_pg.append(sess.run(V_pi))\n",
        "      _, fish = sess.run([train_op, fisher])  \n",
        "      \n",
        "  ax[i].scatter(*zip(*value_functions), color=color)\n",
        "  ax[i].scatter(*zip(*value_functions_pg), c=np.arange(len(value_functions_pg)))\n",
        "  cfg_axes(ax[i])\n",
        "  ax[i].tick_params(axis='both', which='major', labelsize=16)\n",
        "\n",
        "plt.show()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 0,
      "metadata": {
        "cellView": "both",
        "colab": {},
        "colab_type": "code",
        "id": "Vh10-rE4D8BZ"
      },
      "outputs": [],
      "source": [
        "#@title Cross Entropy Methods\n",
        "num_samples = 500\n",
        "num_best_samples = 50\n",
        "initial_cov = .5*np.eye(2)\n",
        "num_iterations = 50\n",
        "noise = True\n",
        "print \"Add noise: \", True\n",
        "\n",
        "def get_cem_traj(mean, cov, num_iterations, noise):\n",
        "  value_functions = []\n",
        "  for cnt in range(num_iterations):\n",
        "    value_functions.append(get_value_function_from_logits(mean))\n",
        "    results = []\n",
        "    for lgts in np.random.multivariate_normal(mean, cov, num_samples):\n",
        "      results.append([lgts, np.mean(get_value_function_from_logits(lgts))])\n",
        "      results = sorted(results, key=lambda x: -x[1])\n",
        "    best_logits = np.array([lgts for lgts, _ in results[:num_best_samples]]) \n",
        "    mean = np.mean(best_logits, axis=0)\n",
        "    if noise:\n",
        "      cov = np.cov(best_logits.T) + 0.01*np.eye(2)\n",
        "    else:\n",
        "      cov = np.cov(best_logits.T)\n",
        "  return value_functions\n",
        "\n",
        "fig, ax = plt.subplots(nrows=1, ncols=len(logits), figsize=(15, 5))\n",
        "plt.subplots_adjust(wspace=0.1)\n",
        "for i, logit in enumerate(logits):\n",
        "  value_functions_cem = get_cem_traj(logit, initial_cov, num_iterations, noise)\n",
        "  ax[i].scatter(*zip(*value_functions), color=color)\n",
        "  ax[i].scatter(*zip(*value_functions_cem), c=np.arange(len(value_functions_cem)))\n",
        "  cfg_axes(ax[i])"
      ]
    }
  ],
  "metadata": {
    "colab": {
      "collapsed_sections": [],
      "last_runtime": {
        "build_target": "",
        "kind": "local"
      },
      "name": "notebook_polytope_icml.ipynb",
      "provenance": [
        {
          "file_id": "1PmHx7K1XHcuQJ1FzVp2UAaYxii3D5F8h",
          "timestamp": 1548109859373
        },
        {
          "file_id": "1TR-Mfc-77rKwUysWeHbX5dqc4OFqsRRF",
          "timestamp": 1548089204132
        },
        {
          "file_id": "1BxTa49gbwwTUOQD29b8A8His68BhsI2d",
          "timestamp": 1547386266249
        },
        {
          "file_id": "1U3Z-x3oiVCznFiGPNKA3tmy8sMJUFbz9",
          "timestamp": 1546544000152
        },
        {
          "file_id": "1a80zUGvsq8aHk7f5fDhjABXSjH1d06hu",
          "timestamp": 1545297207732
        },
        {
          "file_id": "1IYnVjJL5am8ya0Y-GFMN0O6gTN0GhA97",
          "timestamp": 1545130048181
        },
        {
          "file_id": "1UcQ4gZL9LP9FYsNTk0h70XmeDsvl2dm7",
          "timestamp": 1545129339517
        }
      ],
      "version": "0.3.2"
    },
    "kernelspec": {
      "display_name": "Python 2",
      "name": "python2"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 0
}
