{
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Mf3kOv1YMB5y"
      },
      "source": [
        "# $n \\hat R$ convergence\n",
        "\n",
        "This notebook is intended to present in a reproducible fashion numerical experiments used to evaluate the behavior of $n \\hat R$ across a range of models. Each section can be run independently, once the \"setup\" section has been run.\n",
        "\n",
        "Copyright 2021 Google LLC."
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "-rOdskBSMfQN"
      },
      "outputs": [],
      "source": [
        "#@title Licensed under the Apache License, Version 2.0 (the \"License\"); { display-mode: \"form\" }\n",
        "# you may not use this file except in compliance with the License.\n",
        "# You may obtain a copy of the License at\n",
        "#\n",
        "# https://www.apache.org/licenses/LICENSE-2.0\n",
        "#\n",
        "# Unless required by applicable law or agreed to in writing, software\n",
        "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
        "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
        "# See the License for the specific language governing permissions and\n",
        "# limitations under the License."
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "7-We_g0ZpAa_"
      },
      "source": [
        "## Setup"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "XFYAUYmvxP6P"
      },
      "outputs": [],
      "source": [
        "import numpy as np\n",
        "from matplotlib.pyplot import *\n",
        "# %config InlineBackend.figure_format = 'retina'\n",
        "# matplotlib.pyplot.style.use(\"dark_background\")\n",
        "font = {'family' : 'normal',\n",
        "        'weight' : 'bold',\n",
        "        'size'   : 14}\n",
        "\n",
        "matplotlib.rc('font', **font)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "T5ZT9GpisTTw"
      },
      "outputs": [],
      "source": [
        "import jax\n",
        "from jax import random\n",
        "from jax import numpy as jnp\n",
        "\n",
        "from colabtools import adhoc_import\n",
        "\n",
        "# import tensforflow_datasets\n",
        "from inference_gym import using_jax as gym\n",
        "\n",
        "from tensorflow_probability.spinoffs.fun_mc import using_jax as fun_mcmc\n",
        "\n",
        "\n",
        "# import tensorflow as tf\n",
        "from tensorflow_probability.python.internal import prefer_static as ps\n",
        "from tensorflow_probability.python.internal import unnest\n",
        "\n",
        "\n",
        "import tensorflow_probability as _tfp\n",
        "tfp = _tfp.substrates.jax\n",
        "tfd = tfp.distributions\n",
        "tfb = tfp.bijectors\n",
        "\n",
        "tfp_np = _tfp.substrates.numpy\n",
        "tfd_np = tfp_np.distributions\n",
        "\n",
        "import arviz as az\n",
        "from tensorflow_probability.python.internal.unnest import get_innermost"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "HfoX4rFyqYnB"
      },
      "source": [
        "# Theory"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "ixjgpVHTpmZC"
      },
      "source": [
        "### Example A (Incomplete exploration)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "-eP41AMCp3Ky"
      },
      "outputs": [],
      "source": [
        "a = np.array(range(4, 1024, 4))\n",
        "d = np.repeat(6., len(a))\n",
        "\n",
        "# Two optimization solutions, solving quadratic equations (+ / -)\n",
        "# Remark: + solution gives a negative upper-bound for delta_u\n",
        "alpha_1 = 2 * a + d / 2 - np.sqrt(np.square(2 * a + d / 2) - 2 * a)\n",
        "alpha_2 = a - alpha_1\n",
        "delta_u = (np.square(alpha_1 + d / 2) / (alpha_1 * alpha_2)) / 2\n",
        "\n",
        "eps = 0.01\n",
        "delta = np.square(1 + eps) - 1"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "NU9wiZcnqOEJ"
      },
      "outputs": [],
      "source": [
        "semilogy(a / d, delta_u)\n",
        "hlines(delta, (a / d)[0], (a / d)[len(a) - 1], linestyles = '--',\n",
        "      label =  \"delta for 1.01 threshold\")\n",
        "xlabel(\"a / d\")"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "JvEzWs0apFBt"
      },
      "source": [
        "### Example B (Asymmetric binary initialization)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "DuFSsWrExZUr"
      },
      "outputs": [],
      "source": [
        "alpha_l = 0.5  # options: 0.1, 0.5, 1.2\n",
        "alpha_r = alpha_l\n",
        "\n",
        "p =  np.arange(0., 1., 0.01)\n",
        "\n",
        "sigma_pi = 1   # variance at stationarity\n",
        "\n",
        "# upper bound on initial variance\n",
        "stationary_bound = False\n",
        "theta_0_l = 3\n",
        "theta_0_r = 3 \n",
        "\n",
        "conservative_bound = False\n",
        "if conservative_bound:\n",
        "  sigma_0 = np.power((theta_0_l + theta_0_r), 2) / 4\n",
        "else:\n",
        "  delta_L = (alpha_l / theta_0_l) * np.square(theta_0_l - alpha_l) +\\\n",
        "  (theta_0_l - alpha_l) * np.square(alpha_l)\n",
        "  delta_R = (alpha_r / theta_0_r) * np.square(theta_0_r - alpha_r) +\\\n",
        "  (theta_0_r - alpha_r) * np.square(alpha_r)\n",
        "  sigma_0 = p * delta_L + (1 - p) * delta_R\n",
        "  # sigma_u = p * delta_L + (1 - p) * delta_R\n",
        "  sigma_0 = delta_L  # (for now, assume symmetry)\n",
        "\n",
        "if stationary_bound:\n",
        "  sigma_u = sigma_pi\n",
        "else:\n",
        "  sigma_u = 2 * max(sigma_pi, sigma_0)\n",
        "\n",
        "\n",
        "p = np.arange(0, 1.01, 0.01)     # prob of initializing on the left.\n",
        "\n",
        "# Set bias for chains coming from the left and right\n",
        "alpha_l = 1.2\n",
        "alpha_r = alpha_l\n",
        "\n",
        "delta = 0.02  # Relative tolerance for squared bias"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "RMsqXZcyc18Z"
      },
      "outputs": [],
      "source": [
        "var_mc = p * (1 - p) * np.square(alpha_l + alpha_r)\n",
        "bias_mc_squared = np.square(-p * alpha_l + (1 - p) * alpha_r)\n",
        "\n",
        "rel_var = var_mc / sigma_u\n",
        "rel_err = (var_mc + bias_mc_squared) /sigma_pi"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "xVkg-J800gt1"
      },
      "outputs": [],
      "source": [
        "plot(p, var_mc + bias_mc_squared, label = \"squared error\")\n",
        "plot(p, var_mc, label = \"var_super\")\n",
        "legend(loc = \"best\")"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "KxaOMnRF1b1x"
      },
      "outputs": [],
      "source": [
        "plot(p, rel_err, label = \"rel_err\")\n",
        "plot(p, rel_var, label = \"rel_var\")\n",
        "hlines(delta, 0, 1, linestyles = \"--\", label = \"delta threshold\")\n",
        "legend(loc = \"best\")\n",
        "xlabel(\"p\")\n",
        "title(\"alpha = \" + str(alpha_l))"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "T-8fBo98qidx"
      },
      "source": [
        "# Application to models"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "8_Vc-ULdnX8p"
      },
      "source": [
        "## Setup"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "nrU9Kukfndqe"
      },
      "source": [
        "### Nested $\\hat R$"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "9gwuNLGV6pF2"
      },
      "outputs": [],
      "source": [
        "# Define nested Rhat for one parameter.\n",
        "# Assume for now the indexed parameter is a scalar.\n",
        "# TODO: deprecate state_is_list argument\n",
        "def nested_rhat_1dim(result_state, num_super_chains, index_param, \n",
        "                     num_samples, warmup_length = 0, state_is_list = False,\n",
        "                     vector_index = None):\n",
        "  if state_is_list:\n",
        "    if vector_index is not None:\n",
        "      state_param = result_state[index_param][\n",
        "                  warmup_length:(warmup_length + num_samples), :, vector_index]\n",
        "    else:\n",
        "      state_param = result_state[index_param][\n",
        "                        warmup_length:(warmup_length + num_samples), :]\n",
        "  else:\n",
        "    state_param = result_state[warmup_length:(warmup_length + num_samples),\n",
        "                               :, index_param]\n",
        "\n",
        "  num_samples = state_param.shape[0]\n",
        "  num_chains = state_param.shape[1]\n",
        "  num_sub_chains = num_chains // num_super_chains\n",
        "\n",
        "  state_param = state_param.reshape(num_samples, -1, num_sub_chains, 1)\n",
        "\n",
        "  mean_chain = np.mean(state_param, axis = (0, 3))\n",
        "  between_chain_var = np.var(mean_chain, axis = 1, ddof = 1)\n",
        "  within_chain_var = np.var(state_param, axis = (0, 3), ddof = 1)\n",
        "  total_chain_var = between_chain_var + np.mean(within_chain_var, axis = 1)\n",
        "\n",
        "  mean_super_chain = np.mean(state_param, axis = (0, 2, 3))\n",
        "  between_super_chain_var = np.var(mean_super_chain, ddof = 1)\n",
        "\n",
        "  return np.sqrt(1 + between_super_chain_var / np.mean(total_chain_var)),\\\n",
        "    between_super_chain_var, np.mean(total_chain_var)\n",
        "\n",
        "\n",
        "def nested_rhat(result_state, num_super_chains, index_param, \n",
        "                num_samples, warmup_length = 0, state_is_list = False):\n",
        "  nRhat = np.array([])\n",
        "  B = np.array([])\n",
        "  W = np.array([])\n",
        "  for i in range(0, index_param.shape[0]):\n",
        "    if state_is_list:\n",
        "      shape_state = result_state[index_param[i]].shape\n",
        "      if (len(shape_state) == 2):  # Listed parameter isn't a vector\n",
        "        nRhat_local, B_local, W_local = nested_rhat_1dim(result_state, \n",
        "                         num_super_chains, index_param[i], num_samples,\n",
        "                         warmup_length, state_is_list)\n",
        "\n",
        "        nRhat = np.append(nRhat, nRhat_local)\n",
        "        B = np.append(B, B_local)\n",
        "        W = np.append(W, W_local)\n",
        "\n",
        "      else:  # Listed parameter is a vector\n",
        "        for j in range(0, shape_state[2]):\n",
        "          nRhat_local, B_local, W_local = nested_rhat_1dim(result_state,\n",
        "                           num_super_chains, index_param[i], num_samples,\n",
        "                           warmup_length, state_is_list, \n",
        "                           vector_index = j)\n",
        "\n",
        "          nRhat = np.append(nRhat, nRhat_local)\n",
        "          B = np.append(B, B_local)\n",
        "          W = np.append(W, W_local)\n",
        "\n",
        "    else:  # Parameters are not stored as a list\n",
        "      nRhat_local, B_local, W_local = nested_rhat_1dim(result_state, \n",
        "                         num_super_chains, index_param[i], num_samples,\n",
        "                         warmup_length, state_is_list)\n",
        "\n",
        "      nRhat = np.append(nRhat, nRhat_local)\n",
        "      B = np.append(B, B_local)\n",
        "      W = np.append(W, W_local)\n",
        "\n",
        "  return nRhat, B, W\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "MF802-RmnhRw"
      },
      "source": [
        "### Run fits"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "VV8tGkowKPHC"
      },
      "outputs": [],
      "source": [
        "def construct_kernel(target_log_prob_fn, init_step_size, num_warmup):\n",
        "  kernel = tfp.mcmc.HamiltonianMonteCarlo(target_log_prob_fn, init_step_size, 1)\n",
        "  kernel = tfp.experimental.mcmc.GradientBasedTrajectoryLengthAdaptation(kernel, num_warmup)\n",
        "  kernel = tfp.mcmc.DualAveragingStepSizeAdaptation(\n",
        "      kernel, num_warmup, target_accept_prob = 0.75,\n",
        "      reduce_fn = tfp.math.reduce_log_harmonic_mean_exp)\n",
        "  return kernel\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "1YdkpOvbj7Jd"
      },
      "outputs": [],
      "source": [
        "def run_fits(num_seed, total_samples, initialize, kernel,\n",
        "             num_super_chains, index_param, num_samples, num_warmup,\n",
        "             state_is_list = False):\n",
        "  # TODO: deprecate state_is_list argument.\n",
        "  # if state_is_list:\n",
        "  #   initial_state = initialize((2,), key = jax.random.PRNGKey(1))\n",
        "  #   num_parameters = 0\n",
        "  #   for j in range(0, index_param.shape[0]):\n",
        "  #     if len(initial_state[j].shape) == 1: \n",
        "  #       num_parameters += 1\n",
        "  #     else:\n",
        "  #       num_parameters += initial_state[j].shape[1]\n",
        "  # else:\n",
        "  num_parameters = index_param.shape[0]\n",
        "\n",
        "  Rhat_list = np.zeros((num_seed, num_parameters))\n",
        "  nRhat_list = np.zeros((num_seed, num_parameters))\n",
        "  B_list = np.zeros((num_seed, num_parameters))\n",
        "  W_list = np.zeros((num_seed, num_parameters))\n",
        "  mc_mean_list = np.zeros((num_seed, num_parameters))\n",
        "\n",
        "  i = 0\n",
        "  for seed in jax.random.split(jax.random.PRNGKey(1), num_seed):\n",
        "    initial_state = initialize((num_super_chains,), key = seed + 1954)\n",
        "\n",
        "    if (state_is_list):\n",
        "      for j in range(0, len(initial_state)):\n",
        "        initial_state[j] = np.repeat(initial_state[j],\n",
        "                                     num_chains // num_super_chains, axis = 0)\n",
        "    else:\n",
        "      initial_state = np.repeat(initial_state, num_chains // num_super_chains,\n",
        "                                axis = 0)\n",
        "\n",
        "    result = tfp.mcmc.sample_chain(\n",
        "      total_samples, initial_state, kernel = kernel,\n",
        "      seed = seed)\n",
        "\n",
        "    # if (state_is_list):\n",
        "      # result_samples = result.all_states\n",
        "      # print(\"Shape:\", result_samples[0].shape)\n",
        "      # for j in range(0, index_param.shape[0]):\n",
        "      #   result_samples[j] = result_samples[j][num_warmup:]\n",
        "      \n",
        "      # Rhat_local_list = tfp.mcmc.potential_scale_reduction(result_samples)\n",
        "\n",
        "      # param_index = 0\n",
        "      # for j in range(0, index_param.shape[0]):\n",
        "      #   if ()\n",
        "      #   Rhat_list[i, param_index:]\n",
        "\n",
        "      # Rhat_list[i, :] = tfp.mcmc.potential_scale_reduction(result_samples)\n",
        "    # else:\n",
        "    Rhat_list[i, :] = tfp.mcmc.potential_scale_reduction(\n",
        "                result.all_states[num_warmup:(num_warmup + num_samples), :,\n",
        "                                  index_param])\n",
        "\n",
        "    # print(result.all_states[0].shape)\n",
        "\n",
        "    # if state_is_list:\n",
        "    #   for j in range(0, index_param.shape[0]):\n",
        "\n",
        "    #     print(tfp.mcmc.potential_scale_reduction(\n",
        "    #         result.all_states[j][num_warmup:(num_warmup + num_samples)]))\n",
        "\n",
        "    #     Rhat_list[i, j] = tfp.mcmc.potential_scale_reduction(\n",
        "    #         result.all_states[j][num_warmup:(num_warmup + num_samples)]\n",
        "    #     )\n",
        "    # else:\n",
        "    #   Rhat_list[i, :] = tfp.mcmc.potential_scale_reduction(\n",
        "    #     result.all_states[num_warmup:(num_warmup + num_samples), :,\n",
        "    #                       index_param])\n",
        "\n",
        "\n",
        "    nRhat_local, B_local, W_local = nested_rhat(result.all_states,\n",
        "                                   num_super_chains = num_super_chains,\n",
        "                                   index_param = index_param,\n",
        "                                   num_samples = num_samples,\n",
        "                                   warmup_length = num_warmup,\n",
        "                                   state_is_list = state_is_list)\n",
        "\n",
        "    nRhat_list[i, :] = nRhat_local\n",
        "    B_list[i, :] = B_local\n",
        "    W_list[i, :] = W_local\n",
        "\n",
        "    mc_mean_list[i, :] = np.mean(result.all_states[num_warmup + 1, :,\n",
        "                                                   index_param],\n",
        "                        axis = 1)\n",
        "    i += 1\n",
        "\n",
        "  return Rhat_list, nRhat_list, B_list, W_list, mc_mean_list\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "emDXXkPxnj5i"
      },
      "source": [
        "### Adaptive warmup (forge chain)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "v06qapg-UIlU"
      },
      "outputs": [],
      "source": [
        "def forge_chain (kernel_cold, kernel_warm, initial_state, num_super_chains,\n",
        "                 num_warmup, num_samples,\n",
        "                 target_rhat, max_num_steps, index_param, seed,\n",
        "                 num_nRhat_comp = 1,\n",
        "                 state_is_list = False):\n",
        "  warmup_is_acceptable = False\n",
        "  window_iteration = 0\n",
        "  current_state = initial_state\n",
        "  kernel_args = None\n",
        "\n",
        "  while (not warmup_is_acceptable and window_iteration \u003c max_num_steps):\n",
        "    window_iteration += 1\n",
        "\n",
        "    # 1) Run MCMC on warmup window.\n",
        "    result_cold, trace, kernel_args = tfp.mcmc.sample_chain(\n",
        "        num_results = num_warmup,\n",
        "        current_state = current_state,\n",
        "        kernel = kernel_cold,\n",
        "        previous_kernel_results = kernel_args,\n",
        "        trace_fn = lambda _, pkr: unnest.get_innermost(pkr, 'step_size'),\n",
        "        return_final_kernel_results = True,\n",
        "        seed = seed + window_iteration)  # Update seed during while loop\n",
        "\n",
        "    current_state = result_cold[-1]\n",
        "\n",
        "    # 2) Generate candidate samples.\n",
        "    result_warm, trace = tfp.mcmc.sample_chain(\n",
        "        num_results = num_samples * num_nRhat_comp,\n",
        "        current_state = current_state,\n",
        "        kernel = kernel_warm,\n",
        "        trace_fn = lambda _, pkr: unnest.get_innermost(pkr, 'step_size'),\n",
        "        previous_kernel_results = kernel_args,\n",
        "        seed = seed + 999999)\n",
        "\n",
        "    # 3) Check if candidate samples are acceptable.\n",
        "    nRhat = np.zeros((index_param.shape[0], num_nRhat_comp))\n",
        "    for i in range(0, num_nRhat_comp):\n",
        "      nRhat[:, i], _B, _W = nested_rhat(result_warm[i:((i + 1) * num_samples)],\n",
        "                                  num_super_chains = num_super_chains,\n",
        "                                  index_param = index_param,\n",
        "                                  num_samples = num_samples,\n",
        "                                  state_is_list = state_is_list)\n",
        "    \n",
        "    nRhat_max = max(np.mean(nRhat, axis = 1))\n",
        "    print(nRhat_max)\n",
        "\n",
        "    if (nRhat_max \u003c target_rhat): warmup_is_acceptable = True\n",
        "    # (WHILE loop ends)\n",
        "\n",
        "  return result_warm, window_iteration\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "0GDAgXbZnkfv"
      },
      "outputs": [],
      "source": [
        "def run_forge_chain (num_seed, kernel_cold, kernel_warm, initialize,\n",
        "                     num_super_chains, num_warmup, num_samples,\n",
        "                     target_rhat, max_num_steps, index_param,\n",
        "                     num_nRhat_comp = 1,\n",
        "                     state_is_list = False):\n",
        "  mc_mean_list = np.zeros((num_seed, index_param.shape[0]))\n",
        "  warmup_length = np.zeros(num_seed)\n",
        "\n",
        "  i = 0\n",
        "  for seed in jax.random.split(jax.random.PRNGKey(1), num_seed):\n",
        "    print(\"NEW SEED\")\n",
        "    initial_state = initialize((num_super_chains,), key = seed + 1954)\n",
        "    initial_state = np.repeat(initial_state, num_chains // num_super_chains,\n",
        "                              axis = 0)\n",
        "\n",
        "    result, window_iteration = forge_chain(kernel_cold, kernel_warm,\n",
        "                                           initial_state, num_super_chains,\n",
        "                                           num_warmup, num_samples,\n",
        "                                           target_rhat, max_num_steps,\n",
        "                                           index_param, seed,\n",
        "                                           num_nRhat_comp, state_is_list)\n",
        "\n",
        "    warmup_length[i] = window_iteration * num_warmup\n",
        "    mc_mean_list[i, :] = np.mean(result[0, :, index_param],\n",
        "                        axis = 1)\n",
        "    \n",
        "    i += 1\n",
        "\n",
        "  return mc_mean_list, warmup_length"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "zETqxeAiCfQp"
      },
      "source": [
        "For this experiment, we compute $n \\hat R$ using `n_samples = 5`, to stabilize estimators. To check if the chain is properly warmed up, we only examine the first sample."
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "GIriLg9T1-3d"
      },
      "outputs": [],
      "source": [
        "num_chains = 128\n",
        "num_super_chains = 4\n",
        "num_samples = 5\n",
        "num_seed = 30"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "pGYvmveiK107"
      },
      "source": [
        "If the chains converged (i.e. they behave as though indepdent from one another), then we expect the effective sample size \"reported\" by $n \\hat R$ to be lower-bounded by the number of chains in each super-chain. This gives us an upper-bound for $n \\hat R$:\n",
        "\n",
        "\\begin{eqnarray*}\n",
        "  u_{n \\hat R}  = \\sqrt{1 + \\frac{1}{M}}.\n",
        "\\end{eqnarray*}"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "BFpTeuuCJtyr"
      },
      "outputs": [],
      "source": [
        "# Compute a lower bound on nRhat if the chains behave as though independent.\n",
        "nRhat_upper = np.sqrt(1 + 1 / (num_chains / num_super_chains))\n",
        "print(\"Convergence upper bound for nRhat:\", nRhat_upper) "
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "pUtFLQfuTXFQ"
      },
      "source": [
        "The next line of code is for the adaptive warmup scheme."
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "9I1T1DEZsPrx"
      },
      "source": [
        "## Banana"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "sF6iLDf_qkKO"
      },
      "outputs": [],
      "source": [
        "target = gym.targets.VectorModel(gym.targets.Banana(),\n",
        "                                  flatten_sample_transformations=True)\n",
        "num_dimensions = target.event_shape[0]  \n",
        "init_step_size = 1.\n",
        "\n",
        "def target_log_prob_fn(x):\n",
        "  \"\"\"Unnormalized, unconstrained target density.\n",
        "  \n",
        "  This is a thin wrapper that applies the default bijectors so that we can\n",
        "  ignore any constraints.\n",
        "  \"\"\"\n",
        "  y = target.default_event_space_bijector(x)\n",
        "  fldj = target.default_event_space_bijector.forward_log_det_jacobian(x)\n",
        "  return target.unnormalized_log_prob(y) + fldj\n",
        "\n",
        "# NOTE: Avoid initials centered around the true mean. \n",
        "offset = 2\n",
        "def initialize (shape, key = random.PRNGKey(37272709)):\n",
        "  return 10 * random.normal(key, shape + (num_dimensions,)) + offset"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "IjCJXla2377l"
      },
      "outputs": [],
      "source": [
        "# Get some estimates of the mean and variance.\n",
        "try:\n",
        "  mean_est = target.sample_transformations['identity'].ground_truth_mean\n",
        "except:\n",
        "  print('no ground truth mean')\n",
        "  mean_est = (result.all_states[num_warmup:, :]).mean(0).mean(0)\n",
        "try:\n",
        "  var_est = target.sample_transformations['identity'].ground_truth_standard_deviation**2\n",
        "except:\n",
        "  print('no ground truth std dev')\n",
        "  var_est = ((result.all_states[num_warmup:, :]**2).mean(0).mean(0) -\n",
        "             mean_est**2)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "ifWCkjip2SWE"
      },
      "outputs": [],
      "source": [
        "# Define MCMC kernel\n",
        "num_warmup, num_sampling = 10, 10\n",
        "total_samples = num_warmup + num_sampling\n",
        "\n",
        "kernel = construct_kernel(target_log_prob_fn = target_log_prob_fn,\n",
        "                          init_step_size = init_step_size,\n",
        "                          num_warmup = num_warmup)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "jxwEAZNMGG8S"
      },
      "outputs": [],
      "source": [
        "index_param = np.array([0, 1])"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "oA0oQ8NeTCtx"
      },
      "source": [
        "### $n \\hat R$ diagnostic"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "f6Ng8lGuGrCA"
      },
      "outputs": [],
      "source": [
        "Rhat_list, nRhat_list, B_list, W_list, mc_mean_list = run_fits(\n",
        "           num_seed = num_seed, total_samples = total_samples,\n",
        "           initialize = initialize, kernel = kernel,\n",
        "           num_super_chains = num_super_chains, index_param = index_param,\n",
        "           num_samples = num_samples, num_warmup = num_warmup)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "7vrTdelWEtow"
      },
      "outputs": [],
      "source": [
        "square_error = np.square(mc_mean_list - mean_est[index_param])\n",
        "expected_error = var_est[index_param] / num_chains\n",
        "\n",
        "fig = figure(figsize =(6, index_param.shape[0]))\n",
        "ax = fig.add_axes([0, 0, 1, 1])\n",
        "plot_data = [square_error[:, 0] / expected_error[0], \n",
        "             square_error[:, 1] / expected_error[1]]\n",
        "ax.boxplot(plot_data)\n",
        "axhline(y = 1, linestyle = \"--\")\n",
        "title(\"Squared Error over expected squared error (if chains converged)\")\n",
        "show()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "VPHzf4THNGBp"
      },
      "outputs": [],
      "source": [
        "fig = figure(figsize =(6, 6))\n",
        "ax = fig.add_axes([0, 0, 1, 1])\n",
        "plot_data = [nRhat_list[:, 0], Rhat_list[:, 0],\n",
        "             nRhat_list[:, 1], Rhat_list[:, 1]]\n",
        "axvline(x = 1, color = 'y', linestyle = '--', linewidth = 0.5,\n",
        "        label = \"x = 1\")\n",
        "axvline(x = 1.05, color = 'r', linestyle = '--', linewidth = 0.5,\n",
        "        label = \"x = 1.05\")\n",
        "axvline(x = nRhat_upper, color = 'c', linestyle = '--', linewidth = 0.5,\n",
        "        label = \"x = u\")\n",
        "ax.boxplot(plot_data, vert = 0)\n",
        "ax.set_yticklabels(['nRhat[0]', 'Rhat[0]',\n",
        "                    'nRhat[1]', 'Rhat[1]'])\n",
        "title('Warmup = ' + str(num_warmup) + ', samples = ' + str(num_samples))\n",
        "legend(loc = \"best\")\n",
        "show()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "IoRw3A1JSW2V"
      },
      "outputs": [],
      "source": [
        "# Let's take a closer look at the nRhat's and compare them\n",
        "# to what we would expect from independent samples.\n",
        "fig = figure(figsize = (6, 2))\n",
        "ax = fig.add_axes([0, 0, 1, 1])\n",
        "plot_data = [nRhat_list[:, 0], nRhat_list[:, 1]]\n",
        "axvline(x = 1, color = 'y', linestyle = '--', linewidth = 0.5,\n",
        "        label = \"x = 1\")\n",
        "axvline(x = nRhat_upper, color = 'c', linestyle = '--', linewidth = 0.5,\n",
        "        label = \"x = u\")\n",
        "ax.boxplot(plot_data, vert = 0)\n",
        "ax.set_yticklabels(['nRhat[0]', 'nRhat[1]'])\n",
        "title('Warmup = ' + str(num_warmup) + ', samples = ' + str(num_samples))\n",
        "legend(loc = \"best\")\n",
        "show()"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "k9SVmyNVTLx5"
      },
      "source": [
        "### Adaptive warmup length"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "ZlhJWY0sWyg-"
      },
      "source": [
        "Remark: using a short window gives the algorithm more opportunities to stop, which means start trying our luck. A reasonable compromise seems to use a window of size 100."
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "_i0RzC3IULbT"
      },
      "outputs": [],
      "source": [
        "warmup_window = 100\n",
        "\n",
        "kernel_cold = construct_kernel(target_log_prob_fn = target_log_prob_fn,\n",
        "                               init_step_size = init_step_size,\n",
        "                               num_warmup = warmup_window)\n",
        "\n",
        "kernel_warm = construct_kernel(target_log_prob_fn = target_log_prob_fn,\n",
        "                               init_step_size = init_step_size,\n",
        "                               num_warmup = 0)\n",
        "\n",
        "num_samples = 3"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "CUNRCGJLTNak"
      },
      "outputs": [],
      "source": [
        "mc_mean_list, warmup_length = run_forge_chain(num_seed = num_seed,\n",
        "                                          kernel_cold = kernel_cold,\n",
        "                                          kernel_warm = kernel_warm,\n",
        "                                          initialize = initialize,\n",
        "                                          num_super_chains = num_super_chains,\n",
        "                                          num_warmup = warmup_window,\n",
        "                                          num_samples = num_samples,\n",
        "                                          target_rhat = nRhat_upper,\n",
        "                                          max_num_steps = 1000 // warmup_window,\n",
        "                                          index_param = index_param,\n",
        "                                          num_nRhat_comp = 3)\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "vwWOQOPdU7SU"
      },
      "outputs": [],
      "source": [
        "square_error = np.square(mc_mean_list - mean_est[index_param])\n",
        "expected_error = var_est[index_param] / num_chains\n",
        "\n",
        "plot_data = [square_error[:, 0] / expected_error[0]]\n",
        "for i in range(1, index_param.shape[0]):\n",
        "  plot_data.append(square_error[:, i] / expected_error[i])\n",
        "\n",
        "fig = figure(figsize =(6, 3))\n",
        "ax = fig.add_axes([0, 0, 1, 1])\n",
        "ax.boxplot(plot_data)\n",
        "axhline(y = 1, linestyle = \"--\")\n",
        "title(\"Squared Error over expected squared error (if chains converged)\")\n",
        "show()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "eIxlgpwOVCeF"
      },
      "outputs": [],
      "source": [
        "scatter(warmup_length, square_error[:, 0] / expected_error[0])\n",
        "axhline(y = 1, linestyle = \"--\")\n",
        "show()"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "krLvJlzfP8Hl"
      },
      "source": [
        "## German credit score"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "e9kNu5epQHri"
      },
      "outputs": [],
      "source": [
        "target = gym.targets.VectorModel(gym.targets.GermanCreditNumericLogisticRegression(),\n",
        "                                  flatten_sample_transformations=True)\n",
        "num_dimensions = target.event_shape[0]  \n",
        "init_step_size = 0.02\n",
        "\n",
        "def target_log_prob_fn(x):\n",
        "  \"\"\"Unnormalized, unconstrained target density.\n",
        "  \n",
        "  This is a thin wrapper that applies the default bijectors so that we can\n",
        "  ignore any constraints.\n",
        "  \"\"\"\n",
        "  y = target.default_event_space_bijector(x)\n",
        "  fldj = target.default_event_space_bijector.forward_log_det_jacobian(x)\n",
        "  return target.unnormalized_log_prob(y) + fldj\n",
        "\n",
        "offset = 0.1\n",
        "def initialize (shape, key = random.PRNGKey(37272709)):\n",
        "  return random.normal(key, shape + (num_dimensions,)) + offset"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "gEVD0FZ_Q0Si"
      },
      "outputs": [],
      "source": [
        "# Get some estimates of the mean and variance.\n",
        "try:\n",
        "  mean_est = target.sample_transformations['identity'].ground_truth_mean\n",
        "except:\n",
        "  print('no ground truth mean')\n",
        "  mean_est = (result.all_states[num_warmup:, :]).mean(0).mean(0)\n",
        "try:\n",
        "  var_est = target.sample_transformations['identity'].ground_truth_standard_deviation**2\n",
        "except:\n",
        "  print('no ground truth std dev')\n",
        "  var_est = ((result.all_states[num_warmup:, :]**2).mean(0).mean(0) -\n",
        "             mean_est**2)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "iyQ6NzQ_Q4FP"
      },
      "outputs": [],
      "source": [
        "num_seed = 10\n",
        "num_samples = 5\n",
        "num_warmup, num_sampling = 500, 10\n",
        "total_samples = num_warmup + num_sampling\n",
        "\n",
        "kernel = construct_kernel(target_log_prob_fn = target_log_prob_fn,\n",
        "                          init_step_size = init_step_size,\n",
        "                          num_warmup = num_warmup)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "y4RM62ZfRHSJ"
      },
      "outputs": [],
      "source": [
        "index_param = np.arange(0, 25)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "XU1NsI3FaALi"
      },
      "source": [
        "### $n \\hat R$ diagnostic"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "Zjq6mAoyQ_sY"
      },
      "outputs": [],
      "source": [
        "Rhat_list, nRhat_list, B_list, W_list, mc_mean_list = run_fits(\n",
        "           num_seed = num_seed, total_samples = total_samples,\n",
        "           initialize = initialize, kernel = kernel,\n",
        "           num_super_chains = num_super_chains, index_param = index_param,\n",
        "           num_samples = num_samples, num_warmup = num_warmup)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "uI4pohRF-u5v"
      },
      "outputs": [],
      "source": [
        "# OPTIONAL: correct Rhat to avoid having negative values.\n",
        "if True:\n",
        "  Rhat_list = np.sqrt(np.square(Rhat_list) - (num_samples - 1) / num_samples + 1)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "smuUQgL2HbRE"
      },
      "outputs": [],
      "source": [
        "square_error = np.square(mc_mean_list - mean_est[index_param])\n",
        "expected_error = var_est[index_param] / num_chains\n",
        "\n",
        "plot_data = [square_error[:, 0] / expected_error[0]]\n",
        "for i in range(1, index_param.shape[0]):\n",
        "  plot_data.append(square_error[:, i] / expected_error[i])\n",
        "\n",
        "fig = figure(figsize =(6, 3))\n",
        "ax = fig.add_axes([0, 0, 1, 1])\n",
        "ax.boxplot(plot_data)\n",
        "axhline(y = 1, linestyle = \"--\")\n",
        "title(\"Squared Error over expected squared error (if chains converged)\")\n",
        "show()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "FRBvR93dTQZH"
      },
      "outputs": [],
      "source": [
        "plot_data = [nRhat_list[:, 0], Rhat_list[:, 0]]\n",
        "ylabels = ['nRhat[0]', 'Rhat[0]']\n",
        "plot_data_nRhat = [nRhat_list[:, 0]]\n",
        "ylabels_nRhat = ['nRhat[0]']\n",
        "for i in range(1, index_param.shape[0]):\n",
        "  plot_data.append(nRhat_list[:, i])\n",
        "  plot_data.append(Rhat_list[:, i])\n",
        "  ylabels.append(('nRhat[' + str(i) + ']'))\n",
        "  ylabels.append(('Rhat[' + str(i) + ']'))\n",
        "\n",
        "  plot_data_nRhat.append(nRhat_list[:, i])\n",
        "  ylabels_nRhat.append(('nRhat[' + str(i) + ']'))\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "MtiAH86iTE5M"
      },
      "outputs": [],
      "source": [
        "fig = figure(figsize =(6, 0.5 * index_param.shape[0]))\n",
        "ax = fig.add_axes([0, 0, 1, 1])\n",
        "axvline(x = 1, color = 'y', linestyle = '--', linewidth = 0.5,\n",
        "        label = \"x = 1\")\n",
        "axvline(x = 1.05, color = 'r', linestyle = '--', linewidth = 0.5,\n",
        "        label = \"x = 1.05\")\n",
        "axvline(x = nRhat_upper, color = 'c', linestyle = '--', linewidth = 0.5,\n",
        "        label = \"x = u\")\n",
        "ax.boxplot(plot_data, vert = 0)\n",
        "ax.set_yticklabels(ylabels)\n",
        "title('Warmup = ' + str(num_warmup) + ', samples = ' + str(num_samples))\n",
        "legend(loc = \"best\")\n",
        "show()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "NI4dRjDrVrrC"
      },
      "outputs": [],
      "source": [
        "# Let's take a closer look at the nRhat's and compare them\n",
        "# to what we would expect from independent samples.\n",
        "fig = figure(figsize =(6, 0.5 * index_param.shape[0]))\n",
        "ax = fig.add_axes([0, 0, 1, 1])\n",
        "axvline(x = 1, color = 'y', linestyle = '--', linewidth = 0.5,\n",
        "        label = \"x = 1\")\n",
        "axvline(x = nRhat_upper, color = 'c', linestyle = '--', linewidth = 0.5,\n",
        "        label = \"x = u\")\n",
        "ax.boxplot(plot_data_nRhat, vert = 0)\n",
        "ax.set_yticklabels(ylabels_nRhat)\n",
        "title('Warmup = ' + str(num_warmup) + ', samples = ' + str(num_samples))\n",
        "legend(loc = \"best\")\n",
        "show()"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "tu2cWSfObhBU"
      },
      "source": [
        "### Adapative warmup length"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "u5h5FFoFbgG0"
      },
      "outputs": [],
      "source": [
        "warmup_window = 100\n",
        "max_warmup = 1000\n",
        "\n",
        "kernel_cold = construct_kernel(target_log_prob_fn = target_log_prob_fn,\n",
        "                               init_step_size = init_step_size,\n",
        "                               num_warmup = max_warmup)\n",
        "\n",
        "kernel_warm = construct_kernel(target_log_prob_fn = target_log_prob_fn,\n",
        "                               init_step_size = init_step_size,\n",
        "                               num_warmup = 0)\n",
        "\n",
        "num_samples = 5"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "dsAI4I7Dbt9O"
      },
      "outputs": [],
      "source": [
        "mc_mean_list, warmup_length = run_forge_chain(num_seed = num_seed,\n",
        "                                  kernel_cold = kernel_cold,\n",
        "                                  kernel_warm = kernel_warm,\n",
        "                                  initialize = initialize,\n",
        "                                  num_super_chains = num_super_chains,\n",
        "                                  num_warmup = warmup_window,\n",
        "                                  num_samples = num_samples,\n",
        "                                  target_rhat = nRhat_upper,\n",
        "                                  max_num_steps = max_warmup // warmup_window,\n",
        "                                  index_param = index_param,\n",
        "                                  num_nRhat_comp = 1)\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "1miEClX3bzQs"
      },
      "outputs": [],
      "source": [
        "square_error = np.square(mc_mean_list - mean_est[index_param])\n",
        "expected_error = var_est[index_param] / num_chains\n",
        "\n",
        "plot_data = [square_error[:, 0] / expected_error[0]]\n",
        "for i in range(1, index_param.shape[0]):\n",
        "  plot_data.append(square_error[:, i] / expected_error[i])\n",
        "\n",
        "fig = figure(figsize =(6, 3))\n",
        "ax = fig.add_axes([0, 0, 1, 1])\n",
        "ax.boxplot(plot_data)\n",
        "axhline(y = 1, linestyle = \"--\")\n",
        "title(\"Squared Error over expected squared error (if chains converged)\")\n",
        "show()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "Df-cfRHgbz-g"
      },
      "outputs": [],
      "source": [
        "scatter(warmup_length, square_error[:, 0] / expected_error[0])\n",
        "axhline(y = 1, linestyle = \"--\")\n",
        "show()"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "qh5LlUBckB2m"
      },
      "source": [
        "### Draft"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "vZzpm4XukRCp"
      },
      "outputs": [],
      "source": [
        "seed = jax.random.PRNGKey(1954)\n",
        "initial_state = initialize((num_super_chains,), key = seed + 1954)\n",
        "initial_state = np.repeat(initial_state, num_chains // num_super_chains,\n",
        "                          axis = 0)\n",
        "current_state = initial_state\n",
        "kernel_args = None"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "JAfWSweHkXPs"
      },
      "outputs": [],
      "source": [
        "num_warmup = 250"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "bjmkWNgSj3Wr"
      },
      "outputs": [],
      "source": [
        "result_cold, trace0, kernel_args = tfp.mcmc.sample_chain(\n",
        "        num_results = num_warmup,\n",
        "        current_state = current_state,\n",
        "        kernel = kernel_cold,\n",
        "        previous_kernel_results = kernel_args,\n",
        "        trace_fn = lambda _, pkr: unnest.get_innermost(pkr, 'step_size'),\n",
        "        return_final_kernel_results = True,\n",
        "        seed = seed)  # Update seed during while loop\n",
        "\n",
        "current_state = result_cold[-1]\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "CbaKvus3lIsW"
      },
      "outputs": [],
      "source": [
        "plot(trace0)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "qNCbfq19k15d"
      },
      "outputs": [],
      "source": [
        "result_cold, trace1, kernel_args = tfp.mcmc.sample_chain(\n",
        "        num_results = num_warmup,\n",
        "        current_state = current_state,\n",
        "        kernel = kernel_cold,\n",
        "        previous_kernel_results = kernel_args,\n",
        "        trace_fn = lambda _, pkr: unnest.get_innermost(pkr, 'step_size'),\n",
        "        return_final_kernel_results = True,\n",
        "        seed = seed)  # Update seed during while loop\n",
        "\n",
        "current_state = result_cold[-1]"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "6IffqXt0lsOv"
      },
      "outputs": [],
      "source": [
        "plot(trace1)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "eQDLGPvRmljZ"
      },
      "outputs": [],
      "source": [
        "result_warm, trace = tfp.mcmc.sample_chain(\n",
        "        num_results = num_samples,\n",
        "        current_state = current_state,\n",
        "        kernel = kernel_warm,\n",
        "        trace_fn = lambda _, pkr: unnest.get_innermost(pkr, 'step_size'),\n",
        "        previous_kernel_results = kernel_args,\n",
        "        seed = seed + 999999)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "hLJxHI6Qm5hi"
      },
      "outputs": [],
      "source": [
        "nRhat, _B, _W = nested_rhat(result_warm, num_super_chains, index_param, num_samples)\n",
        "nRhat"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "_wsj073_mNtj"
      },
      "source": [
        "## Eight Schools"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "qsSyqlQGmPTY"
      },
      "outputs": [],
      "source": [
        "# NOTE: inference gym stores the centered parameterization\n",
        "target_raw = gym.targets.EightSchools()  # store raw to examine doc.\n",
        "target = gym.targets.VectorModel(target_raw,\n",
        "                                  flatten_sample_transformations = True)\n",
        "num_dimensions = target.event_shape[0]\n",
        "init_step_size = 1\n",
        "\n",
        "# Using underdispersed initis can show case problems with our diagnostics.\n",
        "# underdispered = False\n",
        "# Options: underdispersed, overdispersed, prior\n",
        "init_type = \"prior\"\n",
        "if init_type == \"underdispersed\":\n",
        "  offset = 0.0\n",
        "  def initialize (shape, key = random.PRNGKey(37272709)):\n",
        "    return 1 * random.normal(key, shape + (num_dimensions,)) + offset\n",
        "elif init_type == \"overdispersed\":\n",
        "  offset = 0.0\n",
        "  def initialize (shape, key = random.PRNGKey(37272709)):\n",
        "    return 100 * random.normal(key, shape + (num_dimensions,)) + offset\n",
        "elif init_type == \"prior\":\n",
        "  def initialize (shape, key = random.PRNGKey(37272709)):\n",
        "    prior_scale = jnp.append(jnp.array([10., 1.]), jnp.repeat(1., 8))\n",
        "    prior_offset = jnp.append(jnp.array([0., 5.]), jnp.repeat(0., 8))\n",
        "    return prior_scale * random.normal(key, shape + (num_dimensions,)) + prior_offset\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "CaRUT8pRAoX6"
      },
      "outputs": [],
      "source": [
        "num_schools = 8\n",
        "y = np.array([28, 8, -3, 7, -1, 1, 18, 12], dtype = np.float32)\n",
        "sigma = np.array([15, 10, 16, 11, 9, 11, 10, 18], dtype = np.float32)\n",
        "\n",
        "# NOTE: the reinterpreted batch dimension specifies the dimension of\n",
        "# each indepdent variable, here the school.\n",
        "model = tfd.JointDistributionSequential([\n",
        "    tfd.Normal(loc = 0., scale = 10., name = \"mu\"),\n",
        "    tfd.Normal(loc = 5., scale = 1., name = \"log_tau\"),\n",
        "    tfd.Independent(tfd.Normal(loc = jnp.zeros(num_schools),\n",
        "                               scale = jnp.ones(num_schools),\n",
        "                               name = \"eta\"),\n",
        "                    reinterpreted_batch_ndims = 1),\n",
        "    lambda eta, log_tau, mu: (\n",
        "        tfd.Independent(tfd.Normal(loc = (mu[..., jnp.newaxis] +\n",
        "                                        jnp.exp(log_tau[..., jnp.newaxis]) *\n",
        "                                        eta),\n",
        "                                   scale = sigma),\n",
        "                        name = \"y\",\n",
        "                        reinterpreted_batch_ndims = 1))\n",
        "  ])\n",
        "\n",
        "def target_log_prob_fn(x):\n",
        "  mu = x[:, 0]\n",
        "  log_tau = x[:, 1]\n",
        "  eta = x[:, 2:10]\n",
        "  return model.log_prob((mu, log_tau, eta, y))\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "VvLs2N_Rd86j"
      },
      "outputs": [],
      "source": [
        "# Use results from running 128 chains with 1000 + 5000 iterations each,\n",
        "# for non-centered parameterization.\n",
        "mean_est = np.array([5.8006573 ,  2.4502006 ,  0.6532423 ,  0.09639207,\n",
        "             -0.23725411,  0.04723661, -0.33556408, -0.19666635,\n",
        "              0.5390533 ,  0.14633301])\n",
        "\n",
        "var_est = np.array([29.60382   ,  0.26338503,  0.6383733 ,  0.4928926 ,\n",
        "              0.65307987,  0.52441144,  0.46658015,  0.5248887 ,\n",
        "              0.49544162,  0.690975])"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "Gnpmi-8HBeud"
      },
      "outputs": [],
      "source": [
        "index_param = np.arange(0, 10)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "FbflQyOJapRj"
      },
      "source": [
        "### $n \\hat R$ diagnostic"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "hkgw444TBSK0"
      },
      "outputs": [],
      "source": [
        "num_seed = 10\n",
        "num_samples = 5\n",
        "num_warmup, num_sampling = 5, 10\n",
        "total_samples = num_warmup + num_sampling\n",
        "\n",
        "kernel = construct_kernel(target_log_prob_fn = target_log_prob_fn,\n",
        "                          init_step_size = init_step_size,\n",
        "                          num_warmup = num_warmup)\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "1rSlrbP_Bnmt"
      },
      "outputs": [],
      "source": [
        "Rhat_list, nRhat_list, B_list, W_list, mc_mean_list = run_fits(\n",
        "           num_seed = num_seed, total_samples = total_samples,\n",
        "           initialize = initialize, kernel = kernel,\n",
        "           num_super_chains = num_super_chains, index_param = index_param,\n",
        "           num_samples = num_samples, num_warmup = num_warmup)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "xU7lQ7dzBuvN"
      },
      "outputs": [],
      "source": [
        "# OPTIONAL: correct Rhat to avoid having negative values.\n",
        "if True:\n",
        "  Rhat_list = np.sqrt(np.square(Rhat_list) - (num_samples - 1) / num_samples + 1)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "-QAHzEnHfWTh"
      },
      "outputs": [],
      "source": [
        "square_error = np.square(mc_mean_list - mean_est[index_param])\n",
        "expected_error = var_est[index_param] / num_chains\n",
        "\n",
        "plot_data = [square_error[:, 0] / expected_error[0]]\n",
        "for i in range(1, index_param.shape[0]):\n",
        "  plot_data.append(square_error[:, i] / expected_error[i])\n",
        "\n",
        "fig = figure(figsize =(6, 3))\n",
        "ax = fig.add_axes([0, 0, 1, 1])\n",
        "ax.boxplot(plot_data)\n",
        "axhline(y = 1, linestyle = \"--\")\n",
        "title(\"Squared Error over expected squared error (if chains converged)\")\n",
        "show()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "uN8FgMfvBv9d"
      },
      "outputs": [],
      "source": [
        "plot_data = [nRhat_list[:, 0], Rhat_list[:, 0]]\n",
        "ylabels = ['nRhat[0]', 'Rhat[0]']\n",
        "plot_data_nRhat = [nRhat_list[:, 0]]\n",
        "ylabels_nRhat = ['nRhat[0]']\n",
        "for i in range(1, index_param.shape[0]):\n",
        "  plot_data.append(nRhat_list[:, i])\n",
        "  plot_data.append(Rhat_list[:, i])\n",
        "  ylabels.append(('nRhat[' + str(i) + ']'))\n",
        "  ylabels.append(('Rhat[' + str(i) + ']'))\n",
        "\n",
        "  plot_data_nRhat.append(nRhat_list[:, i])\n",
        "  ylabels_nRhat.append(('nRhat[' + str(i) + ']'))\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "3rM3kwpFB5P1"
      },
      "outputs": [],
      "source": [
        "fig = figure(figsize =(6, 0.5 * index_param.shape[0]))\n",
        "ax = fig.add_axes([0, 0, 1, 1])\n",
        "axvline(x = 1, color = 'y', linestyle = '--', linewidth = 0.5,\n",
        "        label = \"x = 1\")\n",
        "axvline(x = 1.05, color = 'r', linestyle = '--', linewidth = 0.5,\n",
        "        label = \"x = 1.05\")\n",
        "axvline(x = nRhat_upper, color = 'c', linestyle = '--', linewidth = 0.5,\n",
        "        label = \"x = u\")\n",
        "ax.boxplot(plot_data, vert = 0)\n",
        "ax.set_yticklabels(ylabels)\n",
        "title('Warmup = ' + str(num_warmup) + ', samples = ' + str(num_samples))\n",
        "legend(loc = \"best\")\n",
        "show()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "zaqn0IDCCfXh"
      },
      "outputs": [],
      "source": [
        "# Let's take a closer look at the nRhat's and compare them\n",
        "# to what we would expect from independent samples.\n",
        "fig = figure(figsize =(6, 0.5 * index_param.shape[0]))\n",
        "ax = fig.add_axes([0, 0, 1, 1])\n",
        "axvline(x = 1, color = 'y', linestyle = '--', linewidth = 0.5,\n",
        "        label = \"x = 1\")\n",
        "axvline(x = nRhat_upper, color = 'c', linestyle = '--', linewidth = 0.5,\n",
        "        label = \"x = u\")\n",
        "axvline(x = 1.05, color = 'r', linestyle = '--', linewidth = 0.5,\n",
        "        label = \"x = 1.05\")\n",
        "ax.boxplot(plot_data_nRhat, vert = 0)\n",
        "ax.set_yticklabels(ylabels_nRhat)\n",
        "title('Warmup = ' + str(num_warmup) + ', samples = ' + str(num_samples))\n",
        "legend(loc = \"best\")\n",
        "show()"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "D5LAfXkYav3E"
      },
      "source": [
        "### Adapative warmup length"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "9NUb_sMEUOCT"
      },
      "outputs": [],
      "source": [
        "warmup_window = 100\n",
        "max_warmup = 1000\n",
        "\n",
        "kernel_cold = construct_kernel(target_log_prob_fn = target_log_prob_fn,\n",
        "                               init_step_size = init_step_size,\n",
        "                               num_warmup = max_warmup)\n",
        "\n",
        "kernel_warm = construct_kernel(target_log_prob_fn = target_log_prob_fn,\n",
        "                               init_step_size = init_step_size,\n",
        "                               num_warmup = 0)\n",
        "\n",
        "num_samples = 5"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "-Y8Hsn-Qy36r"
      },
      "outputs": [],
      "source": [
        "mc_mean_list, warmup_length = run_forge_chain(num_seed = num_seed,\n",
        "                                          kernel_cold = kernel_cold,\n",
        "                                          kernel_warm = kernel_warm,\n",
        "                                          initialize = initialize,\n",
        "                                          num_super_chains = num_super_chains,\n",
        "                                          num_warmup = warmup_window,\n",
        "                                          num_samples = num_samples,\n",
        "                                          target_rhat = nRhat_upper,\n",
        "                                          max_num_steps = max_warmup // warmup_window,\n",
        "                                          index_param = index_param)\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "pd0UDlgT1l_C"
      },
      "outputs": [],
      "source": [
        "square_error = np.square(mc_mean_list - mean_est[index_param])\n",
        "expected_error = var_est[index_param] / num_chains\n",
        "\n",
        "plot_data = [square_error[:, 0] / expected_error[0]]\n",
        "for i in range(1, index_param.shape[0]):\n",
        "  plot_data.append(square_error[:, i] / expected_error[i])\n",
        "\n",
        "fig = figure(figsize =(6, 3))\n",
        "ax = fig.add_axes([0, 0, 1, 1])\n",
        "ax.boxplot(plot_data)\n",
        "axhline(y = 1, linestyle = \"--\")\n",
        "title(\"Squared Error over expected squared error (if chains converged)\")\n",
        "show()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "ZXZr-5fm11eq"
      },
      "outputs": [],
      "source": [
        "scatter(warmup_length, square_error[:, 0] / expected_error[0])\n",
        "axhline(y = 1, linestyle = \"--\")\n",
        "show()"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "X1M5qt5fh1H6"
      },
      "source": [
        "## Pharmacokinetic model"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "MWdgeMoCx9HX"
      },
      "source": [
        "### Simulate data"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "dBa3LwSYaa7S"
      },
      "outputs": [],
      "source": [
        "time_after_dose = np.array([0.083, 0.167, 0.25, 0.5, 0.75, 1, 1.5, 2, 3, 4, 6, 8])\n",
        "\n",
        "t = np.append(\n",
        "    np.append(np.append(np.append(0., time_after_dose),\n",
        "                          np.append(12., time_after_dose + 12)),\n",
        "               np.linspace(start = 24, stop = 156, num = 12)),\n",
        "               np.append(jnp.append(168., 168. + time_after_dose),\n",
        "               np.array([180, 192])))\n",
        "\n",
        "start_event = np.array([], dtype = int)\n",
        "dosing_time = range(0, 192, 12)\n",
        "\n",
        "# Use dosing events to determine times of integration between\n",
        "# exterior interventions on the system.\n",
        "eps = 1e-4  # hack to deal with some t being slightly offset.\n",
        "for t_dose in dosing_time:\n",
        "  start_event = np.append(start_event, np.where(abs(t - t_dose) \u003c= eps))\n",
        "\n",
        "amt = jnp.array([1000., 0.])\n",
        "n_dose = start_event.shape[0]\n",
        "\n",
        "start_event = np.append(start_event, t.shape[0] - 1)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "I6ssj0Khw2sQ"
      },
      "outputs": [],
      "source": [
        "n_patients = 100\n",
        "pop_location = jnp.log(jnp.array([1.5, 0.25]))\n",
        "pop_scale = jnp.array([0.15, 0.35])\n",
        "theta_patient = jnp.exp(pop_scale * random.normal(random.PRNGKey(37272709), \n",
        "                          (n_patients, ) + (2,)) + pop_location)\n",
        "\n",
        "amt = np.array([1000., 0.])\n",
        "amt_patient = np.append(np.repeat(amt[0], n_patients),\n",
        "                        np.repeat(amt[1], n_patients))\n",
        "amt_patient = amt_patient.reshape(2, n_patients)\n",
        "\n",
        "# redfine variables from previous section (in case we only run population model)\n",
        "t_jax = jnp.array(t)\n",
        "amt_vec = np.repeat(0., t.shape[0])\n",
        "amt_vec[start_event] = 1000\n",
        "amt_vec[amt_vec.shape[0] - 1] = 0.\n",
        "amt_vec_jax = jnp.array(amt_vec)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "fKudcnVCw7oY"
      },
      "outputs": [],
      "source": [
        "# TODO: remove 'use_second_axis' hack.\n",
        "def ode_map (theta, dt, current_state, use_second_axis = False):\n",
        "  if (use_second_axis):\n",
        "    k1 = theta[0, :]\n",
        "    k2 = theta[1, :]\n",
        "  else: \n",
        "    k1 = theta[:, 0]\n",
        "    k2 = theta[:, 1]\n",
        "\n",
        "  y0_hat = jnp.exp(- k1 * dt) * current_state[0, :]\n",
        "  y1_hat = jnp.exp(- k2 * dt) / (k1 - k2) * (current_state[0, :] * k1 *\\\n",
        "                (1 - jnp.exp((k2 - k1) * dt)) + (k1 - k2) * current_state[1, :])\n",
        "  return jnp.array([y0_hat, y1_hat])\n",
        "\n",
        "# @jax.jit  # Cannot use jit if function has an IF statement.\n",
        "def ode_map_event(theta, use_second_axis = False):\n",
        "  def ode_map_step (current_state, event_index):\n",
        "    dt = t_jax[event_index] - t_jax[event_index - 1]\n",
        "    y_sln = ode_map(theta, dt, current_state, use_second_axis)\n",
        "    dose = jnp.repeat(amt_vec_jax[event_index], n_patients)\n",
        "    y_after_dose = y_sln + jnp.append(jnp.repeat(amt_vec_jax[event_index], n_patients),\n",
        "                                      jnp.repeat(0., n_patients)).reshape(2, n_patients)\n",
        "    return (y_after_dose, y_sln[1, ])\n",
        "\n",
        "  (__, yhat) = jax.lax.scan(ode_map_step, amt_patient, \n",
        "                            np.array(range(1, t.shape[0])),\n",
        "                            unroll = 20)\n",
        "  return yhat"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "YS5OkyWSxBoP"
      },
      "outputs": [],
      "source": [
        "# Simulate some data\n",
        "y_hat = ode_map_event(theta_patient)\n",
        "\n",
        "sigma = 0.1\n",
        "# NOTE: no observation at time t = 0.\n",
        "log_y = sigma * random.normal(random.PRNGKey(1954), y_hat.shape) \\\n",
        "  + jnp.log(y_hat)\n",
        "y_obs = jnp.exp(log_y)\n",
        "\n",
        "figure(figsize = [6, 6])\n",
        "plot(t[1:], y_hat)\n",
        "plot(t[1:], y_obs, 'o', markersize = 2)\n",
        "show()"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "l5A6AmGayAq3"
      },
      "source": [
        "### Fit model with TFP"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "ncqNCrOM7Dw8"
      },
      "source": [
        "As a golden benchmark, we use 1,000 samples, after warming up the chain for 1,000 iterations. With 128 chains, this gives us a total of 128,000 approximate samples."
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "06BzyJA2xIS7"
      },
      "outputs": [],
      "source": [
        "pop_model = tfd.JointDistributionSequentialAutoBatched([\n",
        "    tfd.Normal(loc = jnp.log(1.), scale = 0.1, name = \"log_k1_pop\"),\n",
        "    tfd.Normal(loc = jnp.log(0.3), scale = 0.1, name = \"log_k2_pop\"),\n",
        "    tfd.Normal(loc = jnp.log(0.15), scale = 0.1, name = \"log_scale_k1\"),\n",
        "    tfd.Normal(loc = jnp.log(0.35), scale = 0.1, name = \"log_scale_k2\"),\n",
        "    tfd.Normal(loc = -1., scale = 1., name = \"log_sigma\"),\n",
        "\n",
        "    # non-centered parameterization for hierarchy\n",
        "    tfd.Independent(tfd.Normal(loc = jnp.zeros(n_patients),\n",
        "                               scale = jnp.ones(n_patients),\n",
        "                               name = \"eta_k1\"),\n",
        "                    reinterpreted_batch_ndims = 1),\n",
        "    \n",
        "    tfd.Independent(tfd.Normal(loc = jnp.zeros(n_patients),\n",
        "                               scale = jnp.ones(n_patients),\n",
        "                               name = \"eta_k2\"),\n",
        "                    reinterpreted_batch_ndims = 1),\n",
        "\n",
        "    lambda eta_k2, eta_k1, log_sigma, log_scale_k2, log_scale_k1,\n",
        "           log_k2_pop, log_k1_pop: (\n",
        "      tfd.Independent(tfd.LogNormal(\n",
        "          loc = jnp.log(\n",
        "              ode_map_event(theta = jnp.array([\n",
        "                  jnp.exp(log_k1_pop[..., jnp.newaxis] + eta_k1 * jnp.exp(log_scale_k1[..., jnp.newaxis])),\n",
        "                  jnp.exp(log_k2_pop[..., jnp.newaxis] + eta_k2 * jnp.exp(log_scale_k2[..., jnp.newaxis]))]),\n",
        "                  use_second_axis = True)),\n",
        "          scale = jnp.exp(log_sigma[..., jnp.newaxis]), name = \"y_obs\")))\n",
        "])\n",
        "\n",
        "def pop_target_log_prob_fn(log_k1_pop, log_k2_pop, log_scale_k1, log_scale_k2,\n",
        "                           log_sigma, eta_k1, eta_k2):\n",
        "  return pop_model.log_prob((log_k1_pop, log_k2_pop, log_scale_k1, log_scale_k2,\n",
        "                            log_sigma, eta_k1, eta_k2, y_obs))\n",
        "  # CHECK -- do we need to parenthesis?\n",
        "\n",
        "def pop_target_log_prob_fn_flat(x):\n",
        "  log_k1_pop = x[:, 0]\n",
        "  log_k2_pop = x[:, 1]\n",
        "  log_scale_k1 = x[:, 2]\n",
        "  log_scale_k2 = x[:, 3]\n",
        "  log_sigma = x[:, 4]\n",
        "  eta_k1 = x[:, 5:(5 + n_patients)]\n",
        "  eta_k2 = x[:, (5 + n_patients):(5 + 2 * n_patients)]\n",
        "\n",
        "  return pop_model.log_prob((log_k1_pop, log_k2_pop, log_scale_k1, log_scale_k2,\n",
        "                           log_sigma, eta_k1, eta_k2, y_obs))\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "zzA3YXOtxTi7"
      },
      "outputs": [],
      "source": [
        "def initialize (shape, key = random.PRNGKey(37272709)):\n",
        "  return pop_model.sample(sample_shape = shape, # (num_super_chains, 1),\\\n",
        "                          seed = key)[:7]\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "mXsRIkhBWNEj"
      },
      "outputs": [],
      "source": [
        "num_dimensions = 5 + 2 * n_patients\n",
        "def initialize_flat (shape, key = random.PRNGKey(37272709)):\n",
        "  initial = initialize(shape, key)\n",
        "  # initial_flat = np.zeros((shape, num_dimensions))\n",
        "  initial_flat = np.zeros(shape + (num_dimensions,))\n",
        "  for i in range(0, 5):\n",
        "    initial_flat[:, i] = initial[i]\n",
        "  initial_flat[:, 5:(5 + n_patients)] = initial[5]\n",
        "  initial_flat[:, (5 + n_patients):(5 + 2 * n_patients)] = initial[6]\n",
        "\n",
        "  return initial_flat\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "uSILAbDtzDfk"
      },
      "outputs": [],
      "source": [
        "num_seed = 10\n",
        "num_samples = 5\n",
        "num_warmup, num_sampling = 1000, 1000\n",
        "total_samples = num_warmup + num_sampling\n",
        "init_step_size = 0.001\n",
        "\n",
        "kernel = construct_kernel(target_log_prob_fn = pop_target_log_prob_fn,\n",
        "                          init_step_size = init_step_size,\n",
        "                          num_warmup = num_warmup)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "7w4Pw0z2z-ye"
      },
      "outputs": [],
      "source": [
        "def trace_fn(current_state, pkr):\n",
        "  return (\n",
        "    # proxy for divergent transitions\n",
        "    get_innermost(pkr, 'log_accept_ratio') \u003c -1000,\n",
        "    get_innermost(pkr, 'step_size'),\n",
        "    get_innermost(pkr, 'max_trajectory_length')\n",
        "  )\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "62cQ992n0Mz_"
      },
      "outputs": [],
      "source": [
        "initial_state = initialize((num_super_chains,))\n",
        "for i in range(0, len(initial_state)):\n",
        "  initial_state[i] = np.repeat(initial_state[i],\n",
        "                               num_chains // num_super_chains, axis = 0)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "OFZv43qoz0BW"
      },
      "outputs": [],
      "source": [
        "mcmc_states, diverged = tfp.mcmc.sample_chain(\n",
        "    num_results = total_samples,\n",
        "    current_state = initial_state,\n",
        "    kernel = kernel,\n",
        "    trace_fn = trace_fn,\n",
        "    seed = random.PRNGKey(1954))\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "EGjpRC9s8ylW"
      },
      "source": [
        "### Check the inference"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "tei8YVhKyNCv"
      },
      "source": [
        "Check that the inference is reliable. If it, use it to construct a \"golden benchmark\"."
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "rcogoME9182I"
      },
      "outputs": [],
      "source": [
        "print(\"Divergent transitions after warmup:\",\n",
        "      np.sum(diverged[0][num_warmup:(num_warmup + num_samples)]))"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "A6Rq8FcgzDOm"
      },
      "outputs": [],
      "source": [
        "# Extract samples after warmup from the list\n",
        "mcmc_states_sample = mcmc_states\n",
        "for i in range(0, len(mcmc_states)):\n",
        "  mcmc_states_sample[i] = mcmc_states[i][num_warmup:]"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "66FFguycyfqR"
      },
      "outputs": [],
      "source": [
        "# NOTE: the last parameter is an 'x': not sure where this comes from...\n",
        "parameter_names = pop_model._flat_resolve_names()[:-1]\n",
        "\n",
        "az_states = az.from_dict(\n",
        "    #prior = {k: v[tf.newaxis, ...] for k, v in zip(parameter_names, prior_samples)},\n",
        "    posterior={\n",
        "        k: np.swapaxes(v, 0, 1) for k, v in zip(parameter_names, mcmc_states)\n",
        "    },\n",
        ")\n",
        "\n",
        "fit_summary = az.summary(az_states).filter(items=[\"mean\", \"sd\", \"mcse_sd\", \"hdi_3%\", \n",
        "                                       \"hdi_97%\", \"ess_bulk\", \"ess_tail\", \n",
        "                                       \"r_hat\"])"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "ftGYkOqe3jNf"
      },
      "outputs": [],
      "source": [
        "fit_summary"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "pe618aqQzib-"
      },
      "outputs": [],
      "source": [
        "num_dimensions = fit_summary.shape[0]\n",
        "mean_est = np.zeros(num_dimensions)\n",
        "var_est = np.zeros(num_dimensions)\n",
        "\n",
        "for i in range(0, num_dimensions):\n",
        "  mean_est[i] = fit_summary.iat[i, 0]\n",
        "  var_est[i] = np.square(fit_summary.iat[i, 1])"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "gGyLkEKA87x4"
      },
      "source": [
        "### $n \\hat R$ diagnostic"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "X6rLMMak8_YI"
      },
      "outputs": [],
      "source": [
        "num_seed = 10\n",
        "num_samples = 5\n",
        "num_warmup, num_sampling = 1000, 10\n",
        "total_samples = num_warmup + num_sampling\n",
        "init_step_size = 0.001\n",
        "\n",
        "kernel = construct_kernel(target_log_prob_fn = pop_target_log_prob_fn_flat,\n",
        "                          init_step_size = init_step_size,\n",
        "                          num_warmup = num_warmup)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "zoPFOJN-9Vvl"
      },
      "outputs": [],
      "source": [
        "index_param = np.arange(0, 205)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "VOSsZVQx9P0-"
      },
      "outputs": [],
      "source": [
        "Rhat_list, nRhat_list, B_list, W_list, mc_mean_list = run_fits(\n",
        "           num_seed = num_seed, total_samples = total_samples,\n",
        "           initialize = initialize_flat, kernel = kernel,\n",
        "           num_super_chains = num_super_chains, index_param = index_param,\n",
        "           num_samples = num_samples, num_warmup = num_warmup,\n",
        "           state_is_list = False)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "GRWRypOQ6WSQ"
      },
      "outputs": [],
      "source": [
        "# OPTIONAL: correct Rhat to avoid having negative values.\n",
        "if True:\n",
        "  Rhat_list = np.sqrt(np.square(Rhat_list) - (num_samples - 1) / num_samples + 1)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "nYTzkYi26XEN"
      },
      "outputs": [],
      "source": [
        "square_error = np.square(mc_mean_list - mean_est[index_param])\n",
        "expected_error = var_est[index_param] / num_chains\n",
        "\n",
        "plot_data = [square_error[:, 0] / expected_error[0]]\n",
        "for i in range(1, index_param.shape[0]):\n",
        "  plot_data.append(square_error[:, i] / expected_error[i])\n",
        "\n",
        "fig = figure(figsize =(6, 3))\n",
        "ax = fig.add_axes([0, 0, 1, 1])\n",
        "ax.boxplot(plot_data)\n",
        "axhline(y = 1, linestyle = \"--\")\n",
        "title(\"Squared Error over expected squared error (if chains converged)\")\n",
        "show()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "UMJ2gPUD6iWh"
      },
      "outputs": [],
      "source": [
        "plot_data = [nRhat_list[:, 0], Rhat_list[:, 0]]\n",
        "ylabels = ['nRhat[0]', 'Rhat[0]']\n",
        "plot_data_nRhat = [nRhat_list[:, 0]]\n",
        "ylabels_nRhat = ['nRhat[0]']\n",
        "for i in range(1, index_param.shape[0]):\n",
        "  plot_data.append(nRhat_list[:, i])\n",
        "  plot_data.append(Rhat_list[:, i])\n",
        "  ylabels.append(('nRhat[' + str(i) + ']'))\n",
        "  ylabels.append(('Rhat[' + str(i) + ']'))\n",
        "\n",
        "  plot_data_nRhat.append(nRhat_list[:, i])\n",
        "  ylabels_nRhat.append(('nRhat[' + str(i) + ']'))\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "YIf1zjpy6me4"
      },
      "outputs": [],
      "source": [
        "fig = figure(figsize =(6, 0.5 * index_param.shape[0]))\n",
        "ax = fig.add_axes([0, 0, 1, 1])\n",
        "axvline(x = 1, color = 'y', linestyle = '--', linewidth = 0.5,\n",
        "        label = \"x = 1\")\n",
        "axvline(x = 1.05, color = 'r', linestyle = '--', linewidth = 0.5,\n",
        "        label = \"x = 1.05\")\n",
        "axvline(x = nRhat_upper, color = 'c', linestyle = '--', linewidth = 0.5,\n",
        "        label = \"x = u\")\n",
        "ax.boxplot(plot_data, vert = 0)\n",
        "ax.set_yticklabels(ylabels)\n",
        "title('Warmup = ' + str(num_warmup) + ', samples = ' + str(num_samples))\n",
        "legend(loc = \"best\")\n",
        "show()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "oaUCozlN6yCN"
      },
      "outputs": [],
      "source": [
        "# Let's take a closer look at the nRhat's and compare them\n",
        "# to what we would expect from independent samples.\n",
        "fig = figure(figsize =(6, 0.5 * index_param.shape[0]))\n",
        "ax = fig.add_axes([0, 0, 1, 1])\n",
        "axvline(x = 1, color = 'y', linestyle = '--', linewidth = 0.5,\n",
        "        label = \"x = 1\")\n",
        "axvline(x = nRhat_upper, color = 'c', linestyle = '--', linewidth = 0.5,\n",
        "        label = \"x = u\")\n",
        "axvline(x = 1.05, color = 'r', linestyle = '--', linewidth = 0.5,\n",
        "        label = \"x = 1.05\")\n",
        "ax.boxplot(plot_data_nRhat, vert = 0)\n",
        "ax.set_yticklabels(ylabels_nRhat)\n",
        "title('Warmup = ' + str(num_warmup) + ', samples = ' + str(num_samples))\n",
        "legend(loc = \"best\")\n",
        "show()"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "wyMMXDn35S-6"
      },
      "source": [
        "### Adaptive warmup"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "e6xPho-D5WBl"
      },
      "outputs": [],
      "source": [
        "warmup_window = 100\n",
        "max_warmup = 2000\n",
        "\n",
        "kernel_cold = construct_kernel(target_log_prob_fn = pop_target_log_prob_fn_flat,\n",
        "                          init_step_size = init_step_size,\n",
        "                          num_warmup = max_warmup)\n",
        "\n",
        "kernel_warm = construct_kernel(target_log_prob_fn = pop_target_log_prob_fn_flat,\n",
        "                          init_step_size = init_step_size,\n",
        "                          num_warmup = max_warmup)\n",
        "\n",
        "num_samples = 5"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "5thKuXuV51ll"
      },
      "outputs": [],
      "source": [
        "mc_mean_list, warmup_length = run_forge_chain(num_seed = num_seed,\n",
        "                                          kernel_cold = kernel_cold,\n",
        "                                          kernel_warm = kernel_warm,\n",
        "                                          initialize = initialize_flat,\n",
        "                                          num_super_chains = num_super_chains,\n",
        "                                          num_warmup = warmup_window,\n",
        "                                          num_samples = num_samples,\n",
        "                                          target_rhat = nRhat_upper,\n",
        "                                          max_num_steps = max_warmup // warmup_window,\n",
        "                                          index_param = index_param)\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "de7RXZNHfIaJ"
      },
      "source": [
        "# Draft code"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "P_Z9DLh92QWy"
      },
      "outputs": [],
      "source": [
        "nRhat, _B, _W = nested_rhat(mcmc_states,\n",
        "                    num_super_chains, index_param, num_samples,\n",
        "                    warmup_length = num_warmup,\n",
        "                    state_is_list = True)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "IQnnbqosfKFu"
      },
      "outputs": [],
      "source": [
        "seed = jax.random.PRNGKey(1954)\n",
        "\n",
        "initial_state = initialize((num_super_chains,), key = seed)\n",
        "initial_state = np.repeat(initial_state, num_chains // num_super_chains,\n",
        "                          axis = 0)\n",
        "\n",
        "result = tfp.mcmc.sample_chain(\n",
        "      total_samples, initial_state, kernel = kernel,\n",
        "      seed = seed)\n",
        "\n",
        "np.mean(result.all_states[num_warmup:, :, :], axis = (0, 1))\n",
        "# np.var(result.all_states[num_warmup:, :, :], axis = (0, 1))"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "oIST_uNrdkmH"
      },
      "outputs": [],
      "source": [
        "# Examine B and W\n",
        "B_rescale = B_list / (2 * (nRhat_upper - 1))\n",
        "fig = figure(figsize = (6, 2))\n",
        "ax = fig.add_axes([0, 0, 1, 1])\n",
        "plot_data = [B_rescale[:, 0], W_list[:, 0]]\n",
        "ax.boxplot(plot_data, vert = 1)\n",
        "ax.set_xticklabels(['B_rescale[0]', 'W[0]'])\n",
        "axhline(y = var_est[0], linestyle = '--', linewidth = 0.5,\n",
        "        label = \"x = 1\")\n",
        "show()\n",
        "\n",
        "initial_state = initialize((num_super_chains,), key = jax.random.PRNGKey(1954))\n",
        "initial_state = np.repeat(initial_state, num_chains // num_super_chains,\n",
        "                          axis = 0)\n",
        "\n",
        "result = tfp.mcmc.sample_chain(\n",
        "      total_samples, initial_state, kernel = kernel,\n",
        "      seed = jax.random.PRNGKey(1954))\n",
        "\n",
        "num_sub_chains = num_chains // num_super_chains\n",
        "state_param = result.all_states[:, :, 0].reshape(total_samples, -1, num_sub_chains, 1)\n",
        "mean_super_chain = np.mean(state_param, axis = (2, 3))\n",
        "plot(mean_super_chain)\n",
        "show()\n",
        "\n",
        "B = np.var(mean_super_chain, axis = 1)\n",
        "plot(B)\n",
        "show()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "PxAoz7DgazBt"
      },
      "outputs": [],
      "source": [
        "# Draft\n",
        "result, num_windows = forge_chain(kernel_cold = kernel_cold,\n",
        "                                  kernel_warm = kernel_warm,\n",
        "                                  initial_state = initial_state,\n",
        "                                  num_super_chains = num_super_chains,\n",
        "                                  num_warmup = warmup_window,\n",
        "                                  num_samples = num_samples,\n",
        "                                  target_rhat = nRhat_upper,\n",
        "                                  max_num_steps = 1000 // warmup_window,\n",
        "                                  index_param = index_param,\n",
        "                                  seed = seed)"
      ]
    }
  ],
  "metadata": {
    "colab": {
      "collapsed_sections": [],
      "last_runtime": {
        "build_target": "//learning/deepmind/public/tools/ml_python:ml_notebook",
        "kind": "private"
      },
      "name": "nRhat_convergence.ipynb",
      "private_outputs": true,
      "provenance": [
        {
          "file_id": "1MOclVRm23QVizBmlKLH-FEo3Iu5ERpRS",
          "timestamp": 1632343859482
        }
      ]
    },
    "kernelspec": {
      "display_name": "Python 3",
      "name": "python3"
    },
    "language_info": {
      "name": "python"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 0
}
