{
  "cells": [
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "K1qUI9v2hdgU"
      },
      "outputs": [],
      "source": [
        "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n",
        "# you may not use this file except in compliance with the License.\n",
        "# You may obtain a copy of the License at\n",
        "#\n",
        "# https://www.apache.org/licenses/LICENSE-2.0\n",
        "#\n",
        "# Unless required by applicable law or agreed to in writing, software\n",
        "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
        "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
        "# See the License for the specific language governing permissions and\n",
        "# limitations under the License."
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "oxYwkO_vlhJJ"
      },
      "outputs": [],
      "source": [
        "#@title Imports\n",
        "from typing import Any, Callable, Mapping, Optional, Text, Tuple, Union\n",
        "import functools\n",
        "import collections\n",
        "import getpass\n",
        "import jax\n",
        "import jax.numpy as jnp\n",
        "from jax import tree_util\n",
        "import numpy as np\n",
        "import scipy.linalg\n",
        "from typing import Generator, Mapping, Sequence, Text\n",
        "import tensorflow.compat.v2 as tf\n",
        "import tensorflow_datasets as tfds\n",
        "import time\n",
        "import gym\n",
        "import matplotlib.pyplot as plt\n",
        "\n",
        "import pickle"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "rzbxiHMPq22y"
      },
      "outputs": [],
      "source": [
        "# @title Install necessary packages.\n",
        "!pip install -U dopamine-rl\n",
        "!pip install --upgrade gym\n",
        "!pip install gym[atari,accept-rom-license]"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "c9mFRimnnmsA"
      },
      "outputs": [],
      "source": [
        "from dopamine.discrete_domains import atari_lib"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "MFMQz6ySjDed"
      },
      "outputs": [],
      "source": [
        "'TPU_DRIVER_MODE' in globals()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "DFSxvhTGjmct"
      },
      "outputs": [],
      "source": [
        "TPU_DRIVER_MODE = 1"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "xnA8KPoCOKQS"
      },
      "outputs": [],
      "source": [
        "# get the latest JAX and jaxlib\n",
        "!pip install --upgrade -q jax jaxlib\n",
        "\n",
        "import jax.tools.colab_tpu\n",
        "jax.tools.colab_tpu.setup_tpu()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "gIB5OnUWn3JX"
      },
      "outputs": [],
      "source": [
        "!pip install -U dm-haiku"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "PcBdbTLFrKd1"
      },
      "outputs": [],
      "source": [
        "import haiku as hk"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "yKn5LJM8n-1R"
      },
      "outputs": [],
      "source": [
        "!pip install optax"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "3TO7371hrVlM"
      },
      "outputs": [],
      "source": [
        "import optax"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "Vq7dhP-yrWw4"
      },
      "outputs": [],
      "source": [
        "# @title Load model checkpoint\n",
        "\n",
        "file_path = 'gs://rl-infra-public/multi_game_dt/checkpoint_38274228.pkl'\n",
        "print('loading checkpoint from:', file_path)\n",
        "with tf.io.gfile.GFile(file_path, 'rb') as f:\n",
        "  model_params, model_state = pickle.load(f)\n",
        "\n",
        "model_param_count = sum(x.size for x in jax.tree_util.tree_leaves(model_params))\n",
        "print('Number of model parameters: %.2e' % model_param_count)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "cellView": "form",
        "id": "PQm18_6cLb0w"
      },
      "outputs": [],
      "source": [
        "# @title Utilities\n",
        "\n",
        "def cross_entropy(logits, labels):\n",
        "  \"\"\"Applies sparse cross entropy loss between logits and target labels.\"\"\"\n",
        "  labels = jax.nn.one_hot(labels, logits.shape[-1], dtype=logits.dtype)\n",
        "  loss = -labels * jax.nn.log_softmax(logits)\n",
        "  return jnp.mean(loss)\n",
        "\n",
        "\n",
        "def accuracy(logits, labels):\n",
        "  \"\"\"Applies sparse cross entropy loss between logits and target labels.\"\"\"\n",
        "  predicted_label = jnp.argmax(logits, axis=-1)\n",
        "  acc = jnp.equal(predicted_label, labels).astype(jnp.float32)\n",
        "  return jnp.mean(acc)\n",
        "\n",
        "\n",
        "def add_position_embedding(tokens: jnp.array) -\u003e jnp.array:\n",
        "  \"\"\"Add position embedding to a token sequence.\"\"\"\n",
        "  assert len(tokens.shape) == 3\n",
        "  seq_length = tokens.shape[1]\n",
        "  dim_tokens = tokens.shape[2]\n",
        "  embed_init = hk.initializers.TruncatedNormal(stddev=0.02)\n",
        "  pos_emb = hk.get_parameter('positional_embeddings', [seq_length, dim_tokens], init=embed_init)\n",
        "  tokens = tokens + pos_emb\n",
        "  return tokens\n",
        "\n",
        "\n",
        "def image_embedding(\n",
        "    image: jnp.ndarray,\n",
        "    output_dim: int,\n",
        "    is_training: bool,\n",
        "    output_conv_channels: Optional[int] = 128,\n",
        "    patch_size: Optional[Tuple[int, int]] = (14, 14),\n",
        "):\n",
        "  \"\"\"Embed [B x T x W x H x C] images to tokens [B x T x output_dim] tokens.\n",
        "\n",
        "  Args:\n",
        "    image: [B x T x W x H x C] image to embed.\n",
        "    output_dim: Output embedding dimensionality.\n",
        "    is_training: Whether we're training or not.\n",
        "    output_conv_channels: channel dimensionality of convolution layers (only\n",
        "      for convoluation networks).\n",
        "    patch_size: a tuple (patch_height, patch_width), only for patches.\n",
        "\n",
        "  Returns:\n",
        "    Image embedding of shape [B x T x output_dim] or [B x T x _ x output_dim].\n",
        "  \"\"\"\n",
        "  assert len(image.shape) == 5\n",
        "\n",
        "  image_dims = image.shape[-3:]\n",
        "  batch_dims = image.shape[:2]\n",
        "\n",
        "  # Reshape to [BT x W x H x C].\n",
        "  image = jnp.reshape(image, (-1,) + image.shape[-3:])\n",
        "  # Perform any-image specific processing.\n",
        "  image = image.astype(jnp.float32) / 255.0\n",
        "\n",
        "  patch_height, patch_width = patch_size[0], patch_size[1]\n",
        "  # If patch_size is (14, 14) for example, P = 84 / 14 = 6\n",
        "  image_emb = hk.Conv2D(\n",
        "      output_channels=output_dim,\n",
        "      kernel_shape=(patch_height, patch_width),\n",
        "      stride=(patch_height, patch_width),\n",
        "      padding='VALID',\n",
        "      name='image_emb')(image)  # image_emb is now [BT x P x P x D].\n",
        "\n",
        "  # Reshape to [B x T x P*P x D].\n",
        "  image_emb = jnp.reshape(image_emb, batch_dims + (-1, image_emb.shape[-1]))\n",
        "\n",
        "  emb_init = hk.initializers.RandomNormal(stddev=0.02)\n",
        "  pos_enc_shape = (1, 1, image_emb.shape[2], image_emb.shape[3])\n",
        "  pos_enc = hk.get_parameter(\n",
        "      'image_pos_enc', pos_enc_shape, init=emb_init, dtype=image_emb.dtype)\n",
        "  image_emb = image_emb + pos_enc\n",
        "  return image_emb\n",
        "\n",
        "\n",
        "def sample_from_logits(\n",
        "    rng: jnp.ndarray,\n",
        "    logits: jnp.ndarray,\n",
        "    deterministic: Optional[bool] = False,\n",
        "    temperature: Optional[float] = 1e+0,\n",
        "    top_k: Optional[int] = None,\n",
        "    top_percentile: Optional[float] = None) -\u003e Tuple[jnp.ndarray, jnp.ndarray]:\n",
        "  \"\"\"Generate a categorical sample from given logits.\"\"\"\n",
        "  if deterministic:\n",
        "    sample = jnp.argmax(logits, axis=-1)\n",
        "  else:\n",
        "    rng, sample_rng = jax.random.split(rng)\n",
        "\n",
        "    if top_percentile is not None:\n",
        "      percentile = jnp.percentile(logits, top_percentile, axis=-1)\n",
        "      logits = jnp.where(logits \u003e percentile[..., None], logits, -jnp.inf)\n",
        "    if top_k is not None:\n",
        "      logits, top_indices = jax.lax.top_k(logits, top_k)\n",
        "    sample = jax.random.categorical(sample_rng, temperature * logits, axis=-1)\n",
        "    if top_k is not None:\n",
        "      sample_shape = sample.shape\n",
        "      # Flatten top-k indices and samples for easy indexing.\n",
        "      top_indices = jnp.reshape(top_indices, [-1, top_k])\n",
        "      sample = sample.flatten()\n",
        "      sample = top_indices[jnp.arange(len(sample)), sample]\n",
        "      # Reshape samples back to original dimensions.\n",
        "      sample = jnp.reshape(sample, sample_shape)\n",
        "  return sample, rng\n",
        "\n",
        "\n",
        "def autoregressive_generate(\n",
        "    rng: jnp.ndarray,\n",
        "    logits_fn: Callable[[jnp.ndarray, Mapping[str, jnp.ndarray]], jnp.ndarray],\n",
        "    inputs: Mapping[str, jnp.ndarray],\n",
        "    name: str,\n",
        "    sequence_length: int,\n",
        "    deterministic: Optional[bool] = False,\n",
        "    temperature: Optional[float] = 1e+0,\n",
        "    top_k: Optional[int] = None,\n",
        "    top_percentile: Optional[float] = None,\n",
        "    sample_fn: Union[Callable[[jnp.ndarray, jnp.ndarray], jnp.ndarray],\n",
        "                     None] = None\n",
        ") -\u003e Tuple[jnp.ndarray, jnp.ndarray]:\n",
        "  \"\"\"Autoregressively generate an input field given a logit function.\"\"\"\n",
        "  val = jnp.zeros_like(inputs[name])\n",
        "\n",
        "  if sample_fn is None:\n",
        "    sample_fn = functools.partial(\n",
        "        sample_from_logits,\n",
        "        deterministic=deterministic,\n",
        "        temperature=temperature,\n",
        "        top_k=top_k,\n",
        "        top_percentile=top_percentile)\n",
        "\n",
        "  def loop_step(t, acc_rng):\n",
        "    acc, rng = acc_rng\n",
        "    datapoint = dict(inputs)\n",
        "    datapoint[name] = acc\n",
        "    logits = logits_fn(rng, datapoint)\n",
        "    sample, rng = sample_fn(rng, logits[:, t])\n",
        "    acc = acc.at[:, t].set(sample)\n",
        "    return (acc, rng)\n",
        "\n",
        "  val, rng = jax.lax.fori_loop(0, sequence_length, loop_step, (val, rng))\n",
        "  return val, rng\n",
        "\n",
        "\n",
        "def make_return(rew: jnp.ndarray):\n",
        "  \"\"\"Maximize scoring rewards (rew=1) while not terminating (rew=2).\"\"\"\n",
        "  pos_ret = jnp.sum(rew == 1, axis=-1)\n",
        "  neg_ret = jnp.sum(rew == 3, axis=-1)\n",
        "  done = jnp.any(rew == 2, axis=-1)\n",
        "  return (pos_ret - neg_ret) * (1 - done) - done\n",
        "\n",
        "\n",
        "def encode_reward(rew: jnp.ndarray) -\u003e jnp.ndarray:\n",
        "  \"\"\"Encode reward values into values expected by the model.\"\"\"\n",
        "  # 0: no reward   1: positive reward   2: terminal reward   3: negative reward\n",
        "  rew = (rew \u003e 0) * 1 + (rew \u003c 0) * 3\n",
        "  return rew.astype(jnp.int32)\n",
        "\n",
        "\n",
        "def encode_return(ret: jnp.ndarray, ret_range: Tuple[int]) -\u003e jnp.ndarray:\n",
        "  \"\"\"Encode (possibly negative) return values into discrete return tokens.\"\"\"\n",
        "  ret = ret.astype(jnp.int32)\n",
        "  ret = jnp.clip(ret, ret_range[0], ret_range[1])\n",
        "  ret = ret - ret_range[0]\n",
        "  return ret\n",
        "\n",
        "\n",
        "def decode_return(ret: jnp.ndarray, ret_range: Tuple[int]) -\u003e jnp.ndarray:\n",
        "  \"\"\"Decode discrete return tokens into return values.\"\"\"\n",
        "  ret = ret.astype(jnp.int32)\n",
        "  ret = ret + ret_range[0]\n",
        "  return ret"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "cellView": "form",
        "id": "p1IdtiEvLfqq"
      },
      "outputs": [],
      "source": [
        "# @title Transformer definition\n",
        "\n",
        "\n",
        "class DenseBlock(hk.Module):\n",
        "  \"\"\"A 2-layer MLP which widens then narrows the input.\"\"\"\n",
        "\n",
        "  def __init__(self,\n",
        "               init_scale: float,\n",
        "               widening_factor: int = 4,\n",
        "               name: Optional[str] = None):\n",
        "    super().__init__(name=name)\n",
        "    self._init_scale = init_scale\n",
        "    self._widening_factor = widening_factor\n",
        "\n",
        "  def __call__(self, x: jnp.ndarray) -\u003e jnp.ndarray:\n",
        "    hiddens = x.shape[-1]\n",
        "    initializer = hk.initializers.VarianceScaling(self._init_scale)\n",
        "    x = hk.Linear(self._widening_factor * hiddens, w_init=initializer)(x)\n",
        "    x = jax.nn.gelu(x)\n",
        "    return hk.Linear(hiddens, w_init=initializer)(x)\n",
        "\n",
        "\n",
        "def layer_norm(x: jnp.ndarray, name: Optional[str] = None) -\u003e jnp.ndarray:\n",
        "  \"\"\"Apply a unique LayerNorm to x with default settings.\"\"\"\n",
        "  return hk.LayerNorm(\n",
        "      axis=-1, create_scale=True, create_offset=True, name=name)(\n",
        "          x)\n",
        "\n",
        "\n",
        "class CausalSelfAttention(hk.MultiHeadAttention):\n",
        "  \"\"\"Self attention with a causal mask applied.\"\"\"\n",
        "\n",
        "  def __call__(\n",
        "      self,\n",
        "      query: jnp.ndarray,\n",
        "      key: Optional[jnp.ndarray] = None,\n",
        "      value: Optional[jnp.ndarray] = None,\n",
        "      mask: Optional[jnp.ndarray] = None,\n",
        "      custom_causal_mask: Optional[jnp.ndarray] = None,\n",
        "      prefix_length: Optional[int] = 0,\n",
        "  ) -\u003e jnp.ndarray:\n",
        "    key = key if key is not None else query\n",
        "    value = value if value is not None else query\n",
        "\n",
        "    if query.ndim != 3:\n",
        "      raise ValueError('Expect queries of shape [B, T, D].')\n",
        "\n",
        "    seq_len = query.shape[1]\n",
        "    # If custom_causal_mask is None, the default causality assumption is\n",
        "    # sequential (a lower triangular causal mask).\n",
        "    causal_mask = custom_causal_mask\n",
        "    if causal_mask is None:\n",
        "      causal_mask = np.tril(np.ones((seq_len, seq_len)))\n",
        "    causal_mask = causal_mask[None, None, :, :]\n",
        "\n",
        "    # Similar to T5, tokens up to prefix_length can all attend to each other.\n",
        "    causal_mask[:, :, :, :prefix_length] = 1\n",
        "    mask = mask * causal_mask if mask is not None else causal_mask\n",
        "\n",
        "    return super().__call__(query, key, value, mask)\n",
        "\n",
        "\n",
        "class Transformer(hk.Module):\n",
        "  \"\"\"A transformer stack.\"\"\"\n",
        "\n",
        "  def __init__(self,\n",
        "               num_heads: int,\n",
        "               num_layers: int,\n",
        "               dropout_rate: float,\n",
        "               name: Optional[str] = None):\n",
        "    super().__init__(name=name)\n",
        "    self._num_layers = num_layers\n",
        "    self._num_heads = num_heads\n",
        "    self._dropout_rate = dropout_rate\n",
        "\n",
        "  def __call__(self,\n",
        "               h: jnp.ndarray,\n",
        "               mask: Optional[jnp.ndarray],\n",
        "               is_training: bool,\n",
        "               custom_causal_mask: Optional[jnp.ndarray] = None,\n",
        "               prefix_length: Optional[int] = 0) -\u003e jnp.ndarray:\n",
        "    \"\"\"Connects the transformer.\n",
        "\n",
        "    Args:\n",
        "      h: Inputs, [B, T, D].\n",
        "      mask: Padding mask, [B, T].\n",
        "      is_training: Whether we're training or not.\n",
        "      custom_causal_mask: Customized causal mask, [T, T].\n",
        "      prefix_length: Number of prefix tokens that can all attend to each other.\n",
        "\n",
        "    Returns:\n",
        "      Array of shape [B, T, D].\n",
        "    \"\"\"\n",
        "\n",
        "    init_scale = 2. / self._num_layers\n",
        "    dropout_rate = self._dropout_rate if is_training else 0.\n",
        "    if mask is not None:\n",
        "      # Make sure we're not passing any information about masked h.\n",
        "      h = h * mask[:, :, None]\n",
        "      mask = mask[:, None, None, :]\n",
        "\n",
        "    # Note: names chosen to approximately match those used in the GPT-2 code;\n",
        "    # see https://github.com/openai/gpt-2/blob/master/src/model.py.\n",
        "    for i in range(self._num_layers):\n",
        "      h_norm = layer_norm(h, name=f'h{i}_ln_1')\n",
        "      h_attn = CausalSelfAttention(\n",
        "          num_heads=self._num_heads,\n",
        "          key_size=64,\n",
        "          w_init_scale=init_scale,\n",
        "          name=f'h{i}_attn')(\n",
        "              h_norm,\n",
        "              mask=mask,\n",
        "              custom_causal_mask=custom_causal_mask,\n",
        "              prefix_length=prefix_length)\n",
        "      h_attn = hk.dropout(hk.next_rng_key(), dropout_rate, h_attn)\n",
        "      h = h + h_attn\n",
        "      h_norm = layer_norm(h, name=f'h{i}_ln_2')\n",
        "      h_dense = DenseBlock(init_scale, name=f'h{i}_mlp')(h_norm)\n",
        "      h_dense = hk.dropout(hk.next_rng_key(), dropout_rate, h_dense)\n",
        "      h = h + h_dense\n",
        "    h = layer_norm(h, name='ln_f')\n",
        "\n",
        "    return h"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "cellView": "form",
        "id": "yno8Pic7MHsW"
      },
      "outputs": [],
      "source": [
        "# @title Model definition\n",
        "\n",
        "\n",
        "class DecisionTransformer(hk.Module):\n",
        "  \"\"\"Decision transformer module.\"\"\"\n",
        "\n",
        "  def __init__(self,\n",
        "               num_actions: int,\n",
        "               num_rewards: int,\n",
        "               return_range: Tuple[int],\n",
        "               d_model: int,\n",
        "               num_layers: int,\n",
        "               dropout_rate: float,\n",
        "               predict_reward: bool,\n",
        "               single_return_token: bool,\n",
        "               conv_dim: int,\n",
        "               name: Optional[Text] = None):\n",
        "    super().__init__(name=name)\n",
        "\n",
        "    # Expected by the transformer model.\n",
        "    if d_model % 64 != 0:\n",
        "      raise ValueError(f'Model size {d_model} must be divisible by 64')\n",
        "\n",
        "    self.num_actions = num_actions\n",
        "    self.num_rewards = num_rewards\n",
        "    self.num_returns = return_range[1] - return_range[0]\n",
        "    self.return_range = return_range\n",
        "    self.d_model = d_model\n",
        "    self.predict_reward = predict_reward\n",
        "    self.conv_dim = conv_dim\n",
        "    self.single_return_token = single_return_token\n",
        "    self.spatial_tokens = True\n",
        "\n",
        "    self.transformer = Transformer(\n",
        "        name='sequence',\n",
        "        num_heads=self.d_model // 64,\n",
        "        num_layers=num_layers,\n",
        "        dropout_rate=dropout_rate)\n",
        "\n",
        "  def _embed_inputs(\n",
        "      self, obs: jnp.array, ret: jnp.array, act: jnp.array, rew: jnp.array,\n",
        "      is_training: bool) -\u003e Tuple[jnp.array, jnp.array, jnp.array, jnp.array]:\n",
        "    # Embed only prefix_frames first observations.\n",
        "    # obs are [B x T x W x H x C].\n",
        "    obs_emb = image_embedding(\n",
        "        obs,\n",
        "        self.d_model,\n",
        "        is_training=is_training,\n",
        "        output_conv_channels=self.conv_dim)\n",
        "    # Embed returns and actions\n",
        "    embed_init = hk.initializers.TruncatedNormal(stddev=0.02)\n",
        "    # Encode returns.\n",
        "    ret = encode_return(ret, self.return_range)\n",
        "    rew = encode_reward(rew)\n",
        "    ret_emb = hk.Embed(self.num_returns, self.d_model, w_init=embed_init)\n",
        "    ret_emb = ret_emb(ret)\n",
        "    act_emb = hk.Embed(self.num_actions, self.d_model, w_init=embed_init)\n",
        "    act_emb = act_emb(act)\n",
        "    if self.predict_reward:\n",
        "      rew_emb = hk.Embed(self.num_rewards, self.d_model, w_init=embed_init)\n",
        "      rew_emb = rew_emb(rew)\n",
        "    else:\n",
        "      rew_emb = None\n",
        "    return obs_emb, ret_emb, act_emb, rew_emb\n",
        "\n",
        "  def __call__(self, inputs: Mapping[str, jnp.array],\n",
        "               is_training: bool) -\u003e Mapping[str, jnp.array]:\n",
        "    \"\"\"Process sequence.\"\"\"\n",
        "    num_batch = inputs['actions'].shape[0]\n",
        "    num_steps = inputs['actions'].shape[1]\n",
        "    # Embed inputs.\n",
        "    obs_emb, ret_emb, act_emb, rew_emb = self._embed_inputs(\n",
        "        inputs['observations'], inputs['returns-to-go'], inputs['actions'],\n",
        "        inputs['rewards'], is_training)\n",
        "\n",
        "    if self.spatial_tokens:\n",
        "      # obs is [B x T x W x D]\n",
        "      num_obs_tokens = obs_emb.shape[2]\n",
        "      obs_emb = jnp.reshape(obs_emb, obs_emb.shape[:2] + (-1,))\n",
        "      # obs is [B x T x W*D]\n",
        "    else:\n",
        "      num_obs_tokens = 1\n",
        "    # Collect sequence.\n",
        "    # Embeddings are [B x T x D].\n",
        "    if self.predict_reward:\n",
        "      token_emb = jnp.concatenate([obs_emb, ret_emb, act_emb, rew_emb], axis=-1)\n",
        "      tokens_per_step = num_obs_tokens + 3\n",
        "      # sequence is [obs ret act rew ... obs ret act rew]\n",
        "    else:\n",
        "      token_emb = jnp.concatenate([obs_emb, ret_emb, act_emb], axis=-1)\n",
        "      tokens_per_step = num_obs_tokens + 2\n",
        "      # sequence is [obs ret act ... obs ret act]\n",
        "    token_emb = jnp.reshape(\n",
        "        token_emb, [num_batch, tokens_per_step * num_steps, self.d_model])\n",
        "    # Create position embeddings.\n",
        "    token_emb = add_position_embedding(token_emb)\n",
        "    # Run the transformer over the inputs.\n",
        "    # Token dropout.\n",
        "    batch_size = token_emb.shape[0]\n",
        "    obs_mask = jnp.ones([batch_size, num_steps, num_obs_tokens])\n",
        "    ret_mask = jnp.ones([batch_size, num_steps, 1])\n",
        "    act_mask = jnp.ones([batch_size, num_steps, 1])\n",
        "    rew_mask = jnp.ones([batch_size, num_steps, 1])\n",
        "    if self.single_return_token:\n",
        "      # Mask out all return tokens expect the first one.\n",
        "      ret_mask = ret_mask.at[:, 1:].set(0)\n",
        "    if self.predict_reward:\n",
        "      mask = [obs_mask, ret_mask, act_mask, rew_mask]\n",
        "    else:\n",
        "      mask = [obs_mask, ret_mask, act_mask]\n",
        "    mask = jnp.concatenate(mask, axis=-1)\n",
        "    mask = jnp.reshape(mask, [batch_size, tokens_per_step*num_steps])\n",
        "\n",
        "    custom_causal_mask = None\n",
        "    if self.spatial_tokens:\n",
        "      # Temporal transformer by default assumes sequential causal relation.\n",
        "      # This makes the transformer causal mask a lower triangular matrix.\n",
        "      #     P1 P2 R  a  P1 P2 ... (Ps: image patches)\n",
        "      # P1  1  0* 0  0  0  0\n",
        "      # P2  1  1  0  0  0  0\n",
        "      # R   1  1  1  0  0  0\n",
        "      # a   1  1  1  1  0  0\n",
        "      # P1  1  1  1  1  1  0*\n",
        "      # P2  1  1  1  1  1  1\n",
        "      # ... (0*s should be replaced with 1s in the ideal case)\n",
        "      # But, when we have multiple tokens for an image (e.g. patch tokens, conv\n",
        "      # feature map tokens, etc) as inputs to transformer, this assumption does\n",
        "      # not hold, because there is no sequential dependencies between tokens.\n",
        "      # Therefore, the ideal causal mask should not mask out tokens that belong\n",
        "      # to the same images from each others.\n",
        "\n",
        "      seq_len = token_emb.shape[1]\n",
        "      sequential_causal_mask = np.tril(np.ones((seq_len, seq_len)))\n",
        "      num_timesteps = seq_len // tokens_per_step\n",
        "      num_non_obs_tokens = tokens_per_step - num_obs_tokens\n",
        "      diag = [\n",
        "          np.ones((num_obs_tokens, num_obs_tokens)) if i % 2 == 0 else np.zeros(\n",
        "              (num_non_obs_tokens, num_non_obs_tokens))\n",
        "          for i in range(num_timesteps * 2)\n",
        "      ]\n",
        "      block_diag = scipy.linalg.block_diag(*diag)\n",
        "      custom_causal_mask = np.logical_or(sequential_causal_mask, block_diag)\n",
        "      custom_causal_mask = custom_causal_mask.astype(np.float64)\n",
        "\n",
        "    output_emb = self.transformer(token_emb, mask, is_training,\n",
        "                                  custom_causal_mask)\n",
        "    # Output_embeddings are [B x 3T x D].\n",
        "    # Next token predictions (tokens one before their actual place).\n",
        "    ret_pred = output_emb[:, (num_obs_tokens-1)::tokens_per_step, :]\n",
        "    act_pred = output_emb[:, (num_obs_tokens-0)::tokens_per_step, :]\n",
        "    embeds = jnp.concatenate([ret_pred, act_pred], -1)\n",
        "    # Project to appropriate dimensionality.\n",
        "    ret_pred = hk.Linear(self.num_returns, name='ret_linear')(ret_pred)\n",
        "    act_pred = hk.Linear(self.num_actions, name='act_linear')(act_pred)\n",
        "    # Return logits as well as pre-logits embedding.\n",
        "    result_dict = {\n",
        "        'embeds': embeds,\n",
        "        'action_logits': act_pred,\n",
        "        'return_logits': ret_pred,\n",
        "    }\n",
        "    if self.predict_reward:\n",
        "      rew_pred = output_emb[:, (num_obs_tokens+1)::tokens_per_step, :]\n",
        "      rew_pred = hk.Linear(self.num_rewards, name='rew_linear')(rew_pred)\n",
        "      result_dict['reward_logits'] = rew_pred\n",
        "    # Return evaluation metrics.\n",
        "    result_dict['loss'] = self.sequence_loss(inputs, result_dict)\n",
        "    result_dict['accuracy'] = self.sequence_accuracy(inputs, result_dict)\n",
        "    return result_dict\n",
        "\n",
        "  def _objective_pairs(self, inputs: Mapping[str, jnp.ndarray],\n",
        "                       model_outputs: Mapping[str, jnp.ndarray]) -\u003e jnp.ndarray:\n",
        "    \"\"\"Get logit-target pairs for the model objective terms.\"\"\"\n",
        "    act_target = inputs['actions']\n",
        "    ret_target = encode_return(inputs['returns-to-go'], self.return_range)\n",
        "    act_logits = model_outputs['action_logits']\n",
        "    ret_logits = model_outputs['return_logits']\n",
        "    if self.single_return_token:\n",
        "      ret_target = ret_target[:, :1]\n",
        "      ret_logits = ret_logits[:, :1, :]\n",
        "    obj_pairs = [(act_logits, act_target), (ret_logits, ret_target)]\n",
        "    if self.predict_reward:\n",
        "      rew_target = encode_reward(inputs['rewards'])\n",
        "      rew_logits = model_outputs['reward_logits']\n",
        "      obj_pairs.append((rew_logits, rew_target))\n",
        "    return obj_pairs\n",
        "\n",
        "  def sequence_loss(self, inputs: Mapping[str, jnp.ndarray],\n",
        "                    model_outputs: Mapping[str, jnp.ndarray]) -\u003e jnp.ndarray:\n",
        "    \"\"\"Compute the loss on data wrt model outputs.\"\"\"\n",
        "    obj_pairs = self._objective_pairs(inputs, model_outputs)\n",
        "    obj = [cross_entropy(logits, target) for logits, target in obj_pairs]\n",
        "    return sum(obj) / len(obj)\n",
        "\n",
        "  def sequence_accuracy(\n",
        "      self, inputs: Mapping[str, jnp.ndarray],\n",
        "      model_outputs: Mapping[str, jnp.ndarray]) -\u003e jnp.ndarray:\n",
        "    \"\"\"Compute the accuracy on data wrt model outputs.\"\"\"\n",
        "    obj_pairs = self._objective_pairs(inputs, model_outputs)\n",
        "    obj = [accuracy(logits, target) for logits, target in obj_pairs]\n",
        "    return sum(obj) / len(obj)\n",
        "\n",
        "  @staticmethod\n",
        "  def optimal_action(rng: jnp.ndarray,\n",
        "                     inputs: jnp.ndarray,\n",
        "                     logits_fn,\n",
        "                     return_range: Tuple[int],\n",
        "                     single_return_token: bool = False,\n",
        "                     opt_weight: Optional[float] = 0.0,\n",
        "                     num_samples: Optional[int] = 128,\n",
        "                     action_temperature: Optional[float] = 1.0,\n",
        "                     return_temperature: Optional[float] = 1.0,\n",
        "                     action_top_percentile: Optional[float] = None,\n",
        "                     return_top_percentile: Optional[float] = None):\n",
        "    \"\"\"Calculate optimal action for the given sequence model.\"\"\"\n",
        "    obs, act, rew = inputs['observations'], inputs['actions'], inputs['rewards']\n",
        "    assert len(obs.shape) == 5\n",
        "    assert len(act.shape) == 2\n",
        "    inputs = {\n",
        "        'observations': obs,\n",
        "        'actions': act,\n",
        "        'rewards': rew,\n",
        "        'returns-to-go': jnp.zeros_like(act)\n",
        "    }\n",
        "    sequence_length = obs.shape[1]\n",
        "    # Use samples from the last timestep.\n",
        "    timestep = -1\n",
        "    # A biased sampling function that prefers sampling larger returns.\n",
        "    def ret_sample_fn(rng, logits):\n",
        "      assert len(logits.shape) == 2\n",
        "      # Add optimality bias.\n",
        "      if opt_weight \u003e 0.0:\n",
        "        # Calculate log of P(optimality=1|return) := exp(return) / Z.\n",
        "        logits_opt = jnp.linspace(0.0, 1.0, logits.shape[1])\n",
        "        logits_opt = jnp.repeat(logits_opt[None, :], logits.shape[0], axis=0)\n",
        "        # Sample from log[P(optimality=1|return)*P(return)].\n",
        "        logits = logits + opt_weight * logits_opt\n",
        "      logits = jnp.repeat(logits[None, ...], num_samples, axis=0)\n",
        "      ret_sample, rng = sample_from_logits(\n",
        "          rng,\n",
        "          logits,\n",
        "          temperature=return_temperature,\n",
        "          top_percentile=return_top_percentile)\n",
        "      # Pick the highest return sample.\n",
        "      ret_sample = jnp.max(ret_sample, axis=0)\n",
        "      # Convert return tokens into return values.\n",
        "      ret_sample = decode_return(ret_sample, return_range)\n",
        "      return ret_sample, rng\n",
        "\n",
        "    # Set returns-to-go with an (optimistic) autoregressive sample.\n",
        "    if single_return_token:\n",
        "      # Since only first return is used by the model, only sample that (faster).\n",
        "      ret_logits = logits_fn(rng, inputs)['return_logits'][:, 0, :]\n",
        "      ret_sample, rng = ret_sample_fn(rng, ret_logits)\n",
        "      inputs['returns-to-go'] = inputs['returns-to-go'].at[:, 0].set(ret_sample)\n",
        "    else:\n",
        "      # Auto-regressively regenerate all return tokens in a sequence.\n",
        "      ret_logits_fn = lambda rng, input: logits_fn(rng, input)['return_logits']\n",
        "      ret_sample, rng = autoregressive_generate(\n",
        "          rng,\n",
        "          ret_logits_fn,\n",
        "          inputs,\n",
        "          'returns-to-go',\n",
        "          sequence_length,\n",
        "          sample_fn=ret_sample_fn)\n",
        "      inputs['returns-to-go'] = ret_sample\n",
        "\n",
        "    # Generate a sample from action logits.\n",
        "    act_logits = logits_fn(rng, inputs)['action_logits'][:, timestep, :]\n",
        "    act_sample, rng = sample_from_logits(\n",
        "        rng,\n",
        "        act_logits,\n",
        "        temperature=action_temperature,\n",
        "        top_percentile=action_top_percentile)\n",
        "    return act_sample, rng"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "cellView": "form",
        "id": "BKwkbGF4MQcS"
      },
      "outputs": [],
      "source": [
        "# @title Atari environment definition\n",
        "\n",
        "GAME_NAMES = [\n",
        "    'AirRaid', 'Alien', 'Amidar', 'Assault', 'Asterix', 'Asteroids', 'Atlantis',\n",
        "    'BankHeist', 'BattleZone', 'BeamRider', 'Berzerk', 'Bowling', 'Boxing',\n",
        "    'Breakout', 'Carnival', 'Centipede', 'ChopperCommand', 'CrazyClimber',\n",
        "    'DemonAttack', 'DoubleDunk', 'ElevatorAction', 'Enduro', 'FishingDerby',\n",
        "    'Freeway', 'Frostbite', 'Gopher', 'Gravitar', 'Hero', 'IceHockey',\n",
        "    'Jamesbond', 'JourneyEscape', 'Kangaroo', 'Krull', 'KungFuMaster',\n",
        "    'MontezumaRevenge', 'MsPacman', 'NameThisGame', 'Phoenix', 'Pitfall',\n",
        "    'Pong', 'Pooyan', 'PrivateEye', 'Qbert', 'Riverraid', 'RoadRunner',\n",
        "    'Robotank', 'Seaquest', 'Skiing', 'Solaris', 'SpaceInvaders', 'StarGunner',\n",
        "    'Tennis', 'TimePilot', 'Tutankham', 'UpNDown', 'Venture', 'VideoPinball',\n",
        "    'WizardOfWor', 'YarsRevenge', 'Zaxxon'\n",
        "]\n",
        "ATARI_OBSERVATION_SHAPE = (84, 84, 1)\n",
        "ATARI_NUM_ACTIONS = 18  # Maximum number of actions in the full dataset.\n",
        "# rew=0: no reward, rew=1: score a point, rew=2: end game rew=3: lose a point\n",
        "ATARI_NUM_REWARDS = 4\n",
        "ATARI_RETURN_RANGE = [\n",
        "    -20, 100\n",
        "]  # A reasonable range of returns identified in the dataset\n",
        "\n",
        "_FULL_ACTION_SET = [\n",
        "    'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT',\n",
        "    'DOWNRIGHT', 'DOWNLEFT', 'UPFIRE', 'RIGHTFIRE', 'LEFTFIRE', 'DOWNFIRE',\n",
        "    'UPRIGHTFIRE', 'UPLEFTFIRE', 'DOWNRIGHTFIRE', 'DOWNLEFTFIRE'\n",
        "]\n",
        "\n",
        "_LIMITED_ACTION_SET = {\n",
        "    'AirRaid': ['NOOP', 'FIRE', 'RIGHT', 'LEFT', 'RIGHTFIRE', 'LEFTFIRE'],\n",
        "    'Alien': [\n",
        "        'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT',\n",
        "        'DOWNRIGHT', 'DOWNLEFT', 'UPFIRE', 'RIGHTFIRE', 'LEFTFIRE', 'DOWNFIRE',\n",
        "        'UPRIGHTFIRE', 'UPLEFTFIRE', 'DOWNRIGHTFIRE', 'DOWNLEFTFIRE'\n",
        "    ],\n",
        "    'Amidar': [\n",
        "        'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPFIRE', 'RIGHTFIRE',\n",
        "        'LEFTFIRE', 'DOWNFIRE'\n",
        "    ],\n",
        "    'Assault': ['NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'RIGHTFIRE', 'LEFTFIRE'],\n",
        "    'Asterix': [\n",
        "        'NOOP', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT', 'DOWNRIGHT',\n",
        "        'DOWNLEFT'\n",
        "    ],\n",
        "    'Asteroids': [\n",
        "        'NOOP',\n",
        "        'FIRE',\n",
        "        'UP',\n",
        "        'RIGHT',\n",
        "        'LEFT',\n",
        "        'DOWN',\n",
        "        'UPRIGHT',\n",
        "        'UPLEFT',\n",
        "        'UPFIRE',\n",
        "        'RIGHTFIRE',\n",
        "        'LEFTFIRE',\n",
        "        'DOWNFIRE',\n",
        "        'UPRIGHTFIRE',\n",
        "        'UPLEFTFIRE',\n",
        "    ],\n",
        "    'Atlantis': ['NOOP', 'FIRE', 'RIGHTFIRE', 'LEFTFIRE'],\n",
        "    'BankHeist': [\n",
        "        'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT',\n",
        "        'DOWNRIGHT', 'DOWNLEFT', 'UPFIRE', 'RIGHTFIRE', 'LEFTFIRE', 'DOWNFIRE',\n",
        "        'UPRIGHTFIRE', 'UPLEFTFIRE', 'DOWNRIGHTFIRE', 'DOWNLEFTFIRE'\n",
        "    ],\n",
        "    'BattleZone': [\n",
        "        'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT',\n",
        "        'DOWNRIGHT', 'DOWNLEFT', 'UPFIRE', 'RIGHTFIRE', 'LEFTFIRE', 'DOWNFIRE',\n",
        "        'UPRIGHTFIRE', 'UPLEFTFIRE', 'DOWNRIGHTFIRE', 'DOWNLEFTFIRE'\n",
        "    ],\n",
        "    'BeamRider': [\n",
        "        'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'UPRIGHT', 'UPLEFT', 'RIGHTFIRE',\n",
        "        'LEFTFIRE'\n",
        "    ],\n",
        "    'Berzerk': [\n",
        "        'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT',\n",
        "        'DOWNRIGHT', 'DOWNLEFT', 'UPFIRE', 'RIGHTFIRE', 'LEFTFIRE', 'DOWNFIRE',\n",
        "        'UPRIGHTFIRE', 'UPLEFTFIRE', 'DOWNRIGHTFIRE', 'DOWNLEFTFIRE'\n",
        "    ],\n",
        "    'Bowling': ['NOOP', 'FIRE', 'UP', 'DOWN', 'UPFIRE', 'DOWNFIRE'],\n",
        "    'Boxing': [\n",
        "        'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT',\n",
        "        'DOWNRIGHT', 'DOWNLEFT', 'UPFIRE', 'RIGHTFIRE', 'LEFTFIRE', 'DOWNFIRE',\n",
        "        'UPRIGHTFIRE', 'UPLEFTFIRE', 'DOWNRIGHTFIRE', 'DOWNLEFTFIRE'\n",
        "    ],\n",
        "    'Breakout': ['NOOP', 'FIRE', 'RIGHT', 'LEFT'],\n",
        "    'Carnival': ['NOOP', 'FIRE', 'RIGHT', 'LEFT', 'RIGHTFIRE', 'LEFTFIRE'],\n",
        "    'Centipede': [\n",
        "        'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT',\n",
        "        'DOWNRIGHT', 'DOWNLEFT', 'UPFIRE', 'RIGHTFIRE', 'LEFTFIRE', 'DOWNFIRE',\n",
        "        'UPRIGHTFIRE', 'UPLEFTFIRE', 'DOWNRIGHTFIRE', 'DOWNLEFTFIRE'\n",
        "    ],\n",
        "    'ChopperCommand': [\n",
        "        'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT',\n",
        "        'DOWNRIGHT', 'DOWNLEFT', 'UPFIRE', 'RIGHTFIRE', 'LEFTFIRE', 'DOWNFIRE',\n",
        "        'UPRIGHTFIRE', 'UPLEFTFIRE', 'DOWNRIGHTFIRE', 'DOWNLEFTFIRE'\n",
        "    ],\n",
        "    'CrazyClimber': [\n",
        "        'NOOP', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT', 'DOWNRIGHT',\n",
        "        'DOWNLEFT'\n",
        "    ],\n",
        "    'DemonAttack': ['NOOP', 'FIRE', 'RIGHT', 'LEFT', 'RIGHTFIRE', 'LEFTFIRE'],\n",
        "    'DoubleDunk': [\n",
        "        'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT',\n",
        "        'DOWNRIGHT', 'DOWNLEFT', 'UPFIRE', 'RIGHTFIRE', 'LEFTFIRE', 'DOWNFIRE',\n",
        "        'UPRIGHTFIRE', 'UPLEFTFIRE', 'DOWNRIGHTFIRE', 'DOWNLEFTFIRE'\n",
        "    ],\n",
        "    'ElevatorAction': [\n",
        "        'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT',\n",
        "        'DOWNRIGHT', 'DOWNLEFT', 'UPFIRE', 'RIGHTFIRE', 'LEFTFIRE', 'DOWNFIRE',\n",
        "        'UPRIGHTFIRE', 'UPLEFTFIRE', 'DOWNRIGHTFIRE', 'DOWNLEFTFIRE'\n",
        "    ],\n",
        "    'Enduro': [\n",
        "        'NOOP', 'FIRE', 'RIGHT', 'LEFT', 'DOWN', 'DOWNRIGHT', 'DOWNLEFT',\n",
        "        'RIGHTFIRE', 'LEFTFIRE'\n",
        "    ],\n",
        "    'FishingDerby': [\n",
        "        'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT',\n",
        "        'DOWNRIGHT', 'DOWNLEFT', 'UPFIRE', 'RIGHTFIRE', 'LEFTFIRE', 'DOWNFIRE',\n",
        "        'UPRIGHTFIRE', 'UPLEFTFIRE', 'DOWNRIGHTFIRE', 'DOWNLEFTFIRE'\n",
        "    ],\n",
        "    'Freeway': ['NOOP', 'UP', 'DOWN'],\n",
        "    'Frostbite': [\n",
        "        'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT',\n",
        "        'DOWNRIGHT', 'DOWNLEFT', 'UPFIRE', 'RIGHTFIRE', 'LEFTFIRE', 'DOWNFIRE',\n",
        "        'UPRIGHTFIRE', 'UPLEFTFIRE', 'DOWNRIGHTFIRE', 'DOWNLEFTFIRE'\n",
        "    ],\n",
        "    'Gopher': [\n",
        "        'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'UPFIRE', 'RIGHTFIRE', 'LEFTFIRE'\n",
        "    ],\n",
        "    'Gravitar': [\n",
        "        'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT',\n",
        "        'DOWNRIGHT', 'DOWNLEFT', 'UPFIRE', 'RIGHTFIRE', 'LEFTFIRE', 'DOWNFIRE',\n",
        "        'UPRIGHTFIRE', 'UPLEFTFIRE', 'DOWNRIGHTFIRE', 'DOWNLEFTFIRE'\n",
        "    ],\n",
        "    'Hero': [\n",
        "        'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT',\n",
        "        'DOWNRIGHT', 'DOWNLEFT', 'UPFIRE', 'RIGHTFIRE', 'LEFTFIRE', 'DOWNFIRE',\n",
        "        'UPRIGHTFIRE', 'UPLEFTFIRE', 'DOWNRIGHTFIRE', 'DOWNLEFTFIRE'\n",
        "    ],\n",
        "    'IceHockey': [\n",
        "        'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT',\n",
        "        'DOWNRIGHT', 'DOWNLEFT', 'UPFIRE', 'RIGHTFIRE', 'LEFTFIRE', 'DOWNFIRE',\n",
        "        'UPRIGHTFIRE', 'UPLEFTFIRE', 'DOWNRIGHTFIRE', 'DOWNLEFTFIRE'\n",
        "    ],\n",
        "    'Jamesbond': [\n",
        "        'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT',\n",
        "        'DOWNRIGHT', 'DOWNLEFT', 'UPFIRE', 'RIGHTFIRE', 'LEFTFIRE', 'DOWNFIRE',\n",
        "        'UPRIGHTFIRE', 'UPLEFTFIRE', 'DOWNRIGHTFIRE', 'DOWNLEFTFIRE'\n",
        "    ],\n",
        "    'JourneyEscape': [\n",
        "        'NOOP', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT', 'DOWNRIGHT',\n",
        "        'DOWNLEFT', 'RIGHTFIRE', 'LEFTFIRE', 'DOWNFIRE', 'UPRIGHTFIRE',\n",
        "        'UPLEFTFIRE', 'DOWNRIGHTFIRE', 'DOWNLEFTFIRE'\n",
        "    ],\n",
        "    'Kangaroo': [\n",
        "        'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT',\n",
        "        'DOWNRIGHT', 'DOWNLEFT', 'UPFIRE', 'RIGHTFIRE', 'LEFTFIRE', 'DOWNFIRE',\n",
        "        'UPRIGHTFIRE', 'UPLEFTFIRE', 'DOWNRIGHTFIRE', 'DOWNLEFTFIRE'\n",
        "    ],\n",
        "    'Krull': [\n",
        "        'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT',\n",
        "        'DOWNRIGHT', 'DOWNLEFT', 'UPFIRE', 'RIGHTFIRE', 'LEFTFIRE', 'DOWNFIRE',\n",
        "        'UPRIGHTFIRE', 'UPLEFTFIRE', 'DOWNRIGHTFIRE', 'DOWNLEFTFIRE'\n",
        "    ],\n",
        "    'KungFuMaster': [\n",
        "        'NOOP', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'DOWNRIGHT', 'DOWNLEFT',\n",
        "        'RIGHTFIRE', 'LEFTFIRE', 'DOWNFIRE', 'UPRIGHTFIRE', 'UPLEFTFIRE',\n",
        "        'DOWNRIGHTFIRE', 'DOWNLEFTFIRE'\n",
        "    ],\n",
        "    'MontezumaRevenge': [\n",
        "        'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT',\n",
        "        'DOWNRIGHT', 'DOWNLEFT', 'UPFIRE', 'RIGHTFIRE', 'LEFTFIRE', 'DOWNFIRE',\n",
        "        'UPRIGHTFIRE', 'UPLEFTFIRE', 'DOWNRIGHTFIRE', 'DOWNLEFTFIRE'\n",
        "    ],\n",
        "    'MsPacman': [\n",
        "        'NOOP', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT', 'DOWNRIGHT',\n",
        "        'DOWNLEFT'\n",
        "    ],\n",
        "    'NameThisGame': ['NOOP', 'FIRE', 'RIGHT', 'LEFT', 'RIGHTFIRE', 'LEFTFIRE'],\n",
        "    'Phoenix': [\n",
        "        'NOOP', 'FIRE', 'RIGHT', 'LEFT', 'DOWN', 'RIGHTFIRE', 'LEFTFIRE',\n",
        "        'DOWNFIRE'\n",
        "    ],\n",
        "    'Pitfall': [\n",
        "        'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT',\n",
        "        'DOWNRIGHT', 'DOWNLEFT', 'UPFIRE', 'RIGHTFIRE', 'LEFTFIRE', 'DOWNFIRE',\n",
        "        'UPRIGHTFIRE', 'UPLEFTFIRE', 'DOWNRIGHTFIRE', 'DOWNLEFTFIRE'\n",
        "    ],\n",
        "    'Pong': ['NOOP', 'FIRE', 'RIGHT', 'LEFT', 'RIGHTFIRE', 'LEFTFIRE'],\n",
        "    'Pooyan': ['NOOP', 'FIRE', 'UP', 'DOWN', 'UPFIRE', 'DOWNFIRE'],\n",
        "    'PrivateEye': [\n",
        "        'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT',\n",
        "        'DOWNRIGHT', 'DOWNLEFT', 'UPFIRE', 'RIGHTFIRE', 'LEFTFIRE', 'DOWNFIRE',\n",
        "        'UPRIGHTFIRE', 'UPLEFTFIRE', 'DOWNRIGHTFIRE', 'DOWNLEFTFIRE'\n",
        "    ],\n",
        "    'Qbert': ['NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN'],\n",
        "    'Riverraid': [\n",
        "        'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT',\n",
        "        'DOWNRIGHT', 'DOWNLEFT', 'UPFIRE', 'RIGHTFIRE', 'LEFTFIRE', 'DOWNFIRE',\n",
        "        'UPRIGHTFIRE', 'UPLEFTFIRE', 'DOWNRIGHTFIRE', 'DOWNLEFTFIRE'\n",
        "    ],\n",
        "    'RoadRunner': [\n",
        "        'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT',\n",
        "        'DOWNRIGHT', 'DOWNLEFT', 'UPFIRE', 'RIGHTFIRE', 'LEFTFIRE', 'DOWNFIRE',\n",
        "        'UPRIGHTFIRE', 'UPLEFTFIRE', 'DOWNRIGHTFIRE', 'DOWNLEFTFIRE'\n",
        "    ],\n",
        "    'Robotank': [\n",
        "        'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT',\n",
        "        'DOWNRIGHT', 'DOWNLEFT', 'UPFIRE', 'RIGHTFIRE', 'LEFTFIRE', 'DOWNFIRE',\n",
        "        'UPRIGHTFIRE', 'UPLEFTFIRE', 'DOWNRIGHTFIRE', 'DOWNLEFTFIRE'\n",
        "    ],\n",
        "    'Seaquest': [\n",
        "        'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT',\n",
        "        'DOWNRIGHT', 'DOWNLEFT', 'UPFIRE', 'RIGHTFIRE', 'LEFTFIRE', 'DOWNFIRE',\n",
        "        'UPRIGHTFIRE', 'UPLEFTFIRE', 'DOWNRIGHTFIRE', 'DOWNLEFTFIRE'\n",
        "    ],\n",
        "    'Skiing': ['NOOP', 'RIGHT', 'LEFT'],\n",
        "    'Solaris': [\n",
        "        'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT',\n",
        "        'DOWNRIGHT', 'DOWNLEFT', 'UPFIRE', 'RIGHTFIRE', 'LEFTFIRE', 'DOWNFIRE',\n",
        "        'UPRIGHTFIRE', 'UPLEFTFIRE', 'DOWNRIGHTFIRE', 'DOWNLEFTFIRE'\n",
        "    ],\n",
        "    'SpaceInvaders': ['NOOP', 'FIRE', 'RIGHT', 'LEFT', 'RIGHTFIRE', 'LEFTFIRE'],\n",
        "    'StarGunner': [\n",
        "        'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT',\n",
        "        'DOWNRIGHT', 'DOWNLEFT', 'UPFIRE', 'RIGHTFIRE', 'LEFTFIRE', 'DOWNFIRE',\n",
        "        'UPRIGHTFIRE', 'UPLEFTFIRE', 'DOWNRIGHTFIRE', 'DOWNLEFTFIRE'\n",
        "    ],\n",
        "    'Tennis': [\n",
        "        'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT',\n",
        "        'DOWNRIGHT', 'DOWNLEFT', 'UPFIRE', 'RIGHTFIRE', 'LEFTFIRE', 'DOWNFIRE',\n",
        "        'UPRIGHTFIRE', 'UPLEFTFIRE', 'DOWNRIGHTFIRE', 'DOWNLEFTFIRE'\n",
        "    ],\n",
        "    'TimePilot': [\n",
        "        'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPFIRE', 'RIGHTFIRE',\n",
        "        'LEFTFIRE', 'DOWNFIRE'\n",
        "    ],\n",
        "    'Tutankham': [\n",
        "        'NOOP', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPFIRE', 'RIGHTFIRE', 'LEFTFIRE'\n",
        "    ],\n",
        "    'UpNDown': ['NOOP', 'FIRE', 'UP', 'DOWN', 'UPFIRE', 'DOWNFIRE'],\n",
        "    'Venture': [\n",
        "        'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT',\n",
        "        'DOWNRIGHT', 'DOWNLEFT', 'UPFIRE', 'RIGHTFIRE', 'LEFTFIRE', 'DOWNFIRE',\n",
        "        'UPRIGHTFIRE', 'UPLEFTFIRE', 'DOWNRIGHTFIRE', 'DOWNLEFTFIRE'\n",
        "    ],\n",
        "    'VideoPinball': [\n",
        "        'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPFIRE', 'RIGHTFIRE',\n",
        "        'LEFTFIRE'\n",
        "    ],\n",
        "    'WizardOfWor': [\n",
        "        'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPFIRE', 'RIGHTFIRE',\n",
        "        'LEFTFIRE', 'DOWNFIRE'\n",
        "    ],\n",
        "    'YarsRevenge': [\n",
        "        'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT',\n",
        "        'DOWNRIGHT', 'DOWNLEFT', 'UPFIRE', 'RIGHTFIRE', 'LEFTFIRE', 'DOWNFIRE',\n",
        "        'UPRIGHTFIRE', 'UPLEFTFIRE', 'DOWNRIGHTFIRE', 'DOWNLEFTFIRE'\n",
        "    ],\n",
        "    'Zaxxon': [\n",
        "        'NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT',\n",
        "        'DOWNRIGHT', 'DOWNLEFT', 'UPFIRE', 'RIGHTFIRE', 'LEFTFIRE', 'DOWNFIRE',\n",
        "        'UPRIGHTFIRE', 'UPLEFTFIRE', 'DOWNRIGHTFIRE', 'DOWNLEFTFIRE'\n",
        "    ],\n",
        "}\n",
        "\n",
        "# An array that Converts an action from a game-specific to full action set.\n",
        "LIMITED_ACTION_TO_FULL_ACTION = {\n",
        "    game_name: np.array(\n",
        "        [_FULL_ACTION_SET.index(i) for i in _LIMITED_ACTION_SET[game_name]])\n",
        "    for game_name in GAME_NAMES\n",
        "}\n",
        "\n",
        "# An array that Converts an action from a full action set to a game-specific\n",
        "# action set (Setting 0=NOOP if no game-specific action exists).\n",
        "FULL_ACTION_TO_LIMITED_ACTION = {\n",
        "    game_name: np.array([(_LIMITED_ACTION_SET[game_name].index(i)\n",
        "                          if i in _LIMITED_ACTION_SET[game_name] else 0)\n",
        "                         for i in _FULL_ACTION_SET]) for game_name in GAME_NAMES\n",
        "}\n",
        "\n",
        "\n",
        "def _process_observation(obs):\n",
        "  \"\"\"Process observation.\"\"\"\n",
        "  # Apply jpeg auto-encoding to better match observations in the dataset.\n",
        "  return tf.io.decode_jpeg(tf.io.encode_jpeg(obs)).numpy()\n",
        "\n",
        "\n",
        "class AtariEnvWrapper():\n",
        "  \"\"\"Environment wrapper with a unified API.\"\"\"\n",
        "\n",
        "  def __init__(self, game_name: str, full_action_set: Optional[bool] = True):\n",
        "    # Disable randomized sticky actions to reduce variance in evaluation.\n",
        "    self._env = atari_lib.create_atari_environment(\n",
        "        game_name, sticky_actions=False)\n",
        "    self.game_name = game_name\n",
        "    self.full_action_set = full_action_set\n",
        "\n",
        "  @property\n",
        "  def observation_space(self) -\u003e gym.Space:\n",
        "    return self._env.observation_space\n",
        "\n",
        "  @property\n",
        "  def action_space(self) -\u003e gym.Space:\n",
        "    if self.full_action_set:\n",
        "      return gym.spaces.Discrete(len(_FULL_ACTION_SET))\n",
        "    return self._env.action_space\n",
        "\n",
        "  def reset(self) -\u003e np.ndarray:\n",
        "    \"\"\"Reset environment and return observation.\"\"\"\n",
        "    return _process_observation(self._env.reset())\n",
        "\n",
        "  def step(self, action: int) -\u003e Tuple[np.ndarray, float, bool, Any]:\n",
        "    \"\"\"Step environment and return observation, reward, done, info.\"\"\"\n",
        "    if self.full_action_set:\n",
        "      # atari_py library expects limited action set, so convert to limited.\n",
        "      action = FULL_ACTION_TO_LIMITED_ACTION[self.game_name][action]\n",
        "    obs, rew, done, info = self._env.step(action)\n",
        "    obs = _process_observation(obs)\n",
        "    return obs, rew, done, info"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "cellView": "form",
        "id": "XaXxzGSWMTZS"
      },
      "outputs": [],
      "source": [
        "# @title Build model function\n",
        "\n",
        "def model_fn(datapoint, is_training=False):\n",
        "  model = DecisionTransformer(num_actions = ATARI_NUM_ACTIONS,\n",
        "               num_rewards = ATARI_NUM_REWARDS,\n",
        "               return_range = ATARI_RETURN_RANGE,\n",
        "               d_model = 1280,\n",
        "               num_layers = 10,\n",
        "               dropout_rate = 0.1,\n",
        "               predict_reward = True,\n",
        "               single_return_token = True,\n",
        "               conv_dim=256)\n",
        "  return model(datapoint, is_training)\n",
        "\n",
        "model_fn = hk.transform_with_state(model_fn)\n",
        "\n",
        "@jax.jit\n",
        "def optimal_action(rng, inputs):\n",
        "  logits_fn = lambda rng, inputs: model_fn.apply(\n",
        "        model_params, model_state, rng, inputs, is_training=False)[0]\n",
        "\n",
        "  return functools.partial(\n",
        "            DecisionTransformer.optimal_action,\n",
        "            rng=rng,\n",
        "            inputs=inputs,\n",
        "            logits_fn=logits_fn,\n",
        "            return_range = ATARI_RETURN_RANGE,\n",
        "            single_return_token = True,\n",
        "            opt_weight = 0,\n",
        "            num_samples = 128,\n",
        "            action_temperature = 1.0,\n",
        "            return_temperature = 0.75,\n",
        "            action_top_percentile = 50,\n",
        "            return_top_percentile = None)()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "cellView": "form",
        "id": "vnYXhpe7MWkV"
      },
      "outputs": [],
      "source": [
        "# @title Test model function\n",
        "\n",
        "rng = jax.random.PRNGKey(0)\n",
        "\n",
        "batch_size = 2\n",
        "window_size = 4\n",
        "dummy_datapoint = {'observations': np.zeros((batch_size, window_size,) + ATARI_OBSERVATION_SHAPE),\n",
        "      'actions': np.zeros([batch_size, window_size], dtype=np.int32),\n",
        "      'rewards': np.zeros([batch_size, window_size], dtype=np.int32),\n",
        "      'returns-to-go': np.zeros([batch_size, window_size], dtype=np.int32)}\n",
        "\n",
        "init_params, init_state = model_fn.init(rng, dummy_datapoint)\n",
        "\n",
        "result, rng = model_fn.apply(init_params, init_state, rng, dummy_datapoint, is_training=False)\n",
        "print('Result contains: ', result.keys())"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "cellView": "form",
        "id": "0ejsc7s3MYyh"
      },
      "outputs": [],
      "source": [
        "# @title Create environment wrappers\n",
        "\n",
        "class WrappedGymEnv:\n",
        "\n",
        "  def __getattr__(self, name):\n",
        "    \"\"\"Wrappers forward non-overridden method calls to their wrapped env.\"\"\"\n",
        "    if name.startswith('__'):\n",
        "      raise AttributeError(name)\n",
        "    return getattr(self._env, name)\n",
        "\n",
        "class SequenceEnvironmentWrapper(WrappedGymEnv):\n",
        "  \"\"\"Environment wrapper for supporting sequential model inference.\n",
        "  \"\"\"\n",
        "\n",
        "  def __init__(self,\n",
        "               env,\n",
        "               num_stack_frames: int = 1):\n",
        "    self._env = env\n",
        "    self.num_stack_frames = num_stack_frames\n",
        "    if self.is_goal_conditioned:\n",
        "      # If env is goal-conditioned, we want to track goal history.\n",
        "      self.goal_stack = collections.deque([], maxlen=self.num_stack_frames)\n",
        "    self.obs_stack = collections.deque([], maxlen=self.num_stack_frames)\n",
        "    self.act_stack = collections.deque([], maxlen=self.num_stack_frames)\n",
        "    self.rew_stack = collections.deque([], maxlen=self.num_stack_frames)\n",
        "    self.done_stack = collections.deque([], maxlen=self.num_stack_frames)\n",
        "    self.info_stack = collections.deque([], maxlen=self.num_stack_frames)\n",
        "\n",
        "  @property\n",
        "  def observation_space(self):\n",
        "    \"\"\"Constructs observation space.\"\"\"\n",
        "    parent_obs_space = self._env.observation_space\n",
        "    act_space = self.action_space\n",
        "    episode_history = {\n",
        "        'observations': gym.spaces.Box(\n",
        "            np.stack([parent_obs_space.low] * self.num_stack_frames, axis=0),\n",
        "            np.stack([parent_obs_space.high] * self.num_stack_frames, axis=0),\n",
        "            dtype=parent_obs_space.dtype),\n",
        "        'actions': gym.spaces.Box(\n",
        "            0, act_space.n, [self.num_stack_frames], dtype=act_space.dtype),\n",
        "        'rewards': gym.spaces.Box(-np.inf, np.inf, [self.num_stack_frames])\n",
        "    }\n",
        "    if self.is_goal_conditioned:\n",
        "      goal_shape = np.shape(self._env.goal)  # pytype: disable=attribute-error\n",
        "      episode_history['returns-to-go'] = gym.spaces.Box(\n",
        "          -np.inf, np.inf, [self.num_stack_frames] + goal_shape)\n",
        "    return gym.spaces.Dict(**episode_history)\n",
        "\n",
        "  @property\n",
        "  def is_goal_conditioned(self):\n",
        "    return False\n",
        "\n",
        "  def pad_current_episode(self, obs, n):\n",
        "    # Prepad current episode with n steps.\n",
        "    for _ in range(n):\n",
        "      if self.is_goal_conditioned:\n",
        "        self.goal_stack.append(self._env.goal)  # pytype: disable=attribute-error\n",
        "      self.obs_stack.append(np.zeros_like(obs))\n",
        "      self.act_stack.append(0)\n",
        "      self.rew_stack.append(0)\n",
        "      self.done_stack.append(1)\n",
        "      self.info_stack.append(None)\n",
        "\n",
        "  def _get_observation(self):\n",
        "    \"\"\"Return current episode's N-stacked observation.\n",
        "\n",
        "    For N=3, the first observation of the episode (reset) looks like:\n",
        "\n",
        "    *= hasn't happened yet.\n",
        "\n",
        "    GOAL  OBS  ACT  REW  DONE\n",
        "    =========================\n",
        "    g0    0    0.   0.   True\n",
        "    g0    0    0.   0.   True\n",
        "    g0    x0   0.   0.   False\n",
        "\n",
        "    After the first step(a0) taken, yielding x1, r0, done0, info0, the next\n",
        "    observation looks like:\n",
        "\n",
        "    GOAL  OBS  ACT  REW  DONE\n",
        "    =========================\n",
        "    g0    0    0.   0.   True\n",
        "    g0    x0   0.   0.   False\n",
        "    g1    x1   a0   r0   d0\n",
        "\n",
        "    A more chronologically intuitive way to re-order the column data would be:\n",
        "\n",
        "    PREV_ACT  PREV_REW  PREV_DONE CURR_GOAL CURR_OBS\n",
        "    ================================================\n",
        "    0.        0.        True      g0        0\n",
        "    0.        0.        False*    g0        x0\n",
        "    a0        r0        info0     g1        x1\n",
        "\n",
        "    Returns:\n",
        "      episode_history: np.ndarray of observation.\n",
        "    \"\"\"\n",
        "    episode_history = {\n",
        "        'observations': np.stack(self.obs_stack, axis=0),\n",
        "        'actions': np.stack(self.act_stack, axis=0),\n",
        "        'rewards': np.stack(self.rew_stack, axis=0),\n",
        "    }\n",
        "    if self.is_goal_conditioned:\n",
        "      episode_history['returns-to-go'] = np.stack(self.goal_stack, axis=0)\n",
        "    return episode_history\n",
        "\n",
        "  def reset(self):\n",
        "    \"\"\"Resets env and returns new observation.\"\"\"\n",
        "    obs = self._env.reset()\n",
        "    # Create a N-1 \"done\" past frames.\n",
        "    self.pad_current_episode(obs, self.num_stack_frames-1)\n",
        "    # Create current frame (but with placeholder actions and rewards).\n",
        "    if self.is_goal_conditioned:\n",
        "      self.goal_stack.append(self._env.goal)\n",
        "    self.obs_stack.append(obs)\n",
        "    self.act_stack.append(0)\n",
        "    self.rew_stack.append(0)\n",
        "    self.done_stack.append(0)\n",
        "    self.info_stack.append(None)\n",
        "    return self._get_observation()\n",
        "\n",
        "  def step(self, action: np.ndarray):\n",
        "    \"\"\"Replaces env observation with fixed length observation history.\"\"\"\n",
        "    # Update applied action to the previous timestep.\n",
        "    self.act_stack[-1] = action\n",
        "    obs, rew, done, info = self._env.step(action)\n",
        "    self.rew_stack[-1] = rew\n",
        "    # Update frame stack.\n",
        "    self.obs_stack.append(obs)\n",
        "    self.act_stack.append(0)  # Append unknown action to current timestep.\n",
        "    self.rew_stack.append(0)\n",
        "    self.info_stack.append(info)\n",
        "    if self.is_goal_conditioned:\n",
        "      self.goal_stack.append(self._env.goal)\n",
        "    if done:\n",
        "      if self.is_goal_conditioned:\n",
        "        # rewrite the observations to reflect hindsight RtG conditioning.\n",
        "        self.replace_goals_with_hindsight()\n",
        "    return self._get_observation(), rew, done, info\n",
        "\n",
        "  def replace_goals_with_hindsight(self):\n",
        "    # We perform this after rew_stack has been updated.\n",
        "    assert self.is_goal_conditioned\n",
        "    window_return = sum(list(self.rew_stack))\n",
        "    for r in self.rew_stack:\n",
        "      self.goal_stack.append(window_return)\n",
        "      window_return -= r\n",
        "\n",
        "def build_env_fn(game_name):\n",
        "  \"\"\"Returns env constructor fn.\"\"\"\n",
        "\n",
        "  def env_fn():\n",
        "    env = AtariEnvWrapper(game_name)\n",
        "    env = SequenceEnvironmentWrapper(env, 4)\n",
        "    return env\n",
        "\n",
        "  return env_fn"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "bC8PCmaLMd1A"
      },
      "outputs": [],
      "source": [
        "# @title Environment rollout\n",
        "\n",
        "\n",
        "# You can add your own logic and any other collection code here.\n",
        "def _batch_rollout(rng, envs, policy_fn, num_steps=2500, log_interval=None):\n",
        "  \"\"\"Roll out a batch of environments under a given policy function.\"\"\"\n",
        "  # observations are dictionaries. Merge into single dictionary with batched\n",
        "  # observations.\n",
        "  obs_list = [env.reset() for env in envs]\n",
        "  num_batch = len(envs)\n",
        "  obs = tree_util.tree_map(lambda *arr: np.stack(arr, axis=0), *obs_list)\n",
        "  ret = np.zeros([num_batch, 8])\n",
        "  done = np.zeros(num_batch, dtype=np.int32)\n",
        "  rew_sum = np.zeros(num_batch, dtype=np.float32)\n",
        "  frames = []\n",
        "  for t in range(num_steps):\n",
        "    # Collect observations\n",
        "    frames.append(\n",
        "        np.concatenate([o['observations'][-1, ...] for o in obs_list], axis=1))\n",
        "    done_prev = done\n",
        "\n",
        "    actions, rng = policy_fn(rng, obs)\n",
        "\n",
        "    # Collect step results and stack as a batch.\n",
        "    step_results = [env.step(act) for env, act in zip(envs, actions)]\n",
        "    obs_list = [result[0] for result in step_results]\n",
        "    obs = tree_util.tree_map(lambda *arr: np.stack(arr, axis=0), *obs_list)\n",
        "    rew = np.stack([result[1] for result in step_results])\n",
        "    done = np.stack([result[2] for result in step_results])\n",
        "    # Advance state.\n",
        "    done = np.logical_or(done, done_prev).astype(np.int32)\n",
        "    rew = rew * (1 - done)\n",
        "    rew_sum += rew\n",
        "    if log_interval and t % log_interval == 0:\n",
        "      print('step: %d done: %s reward: %s' % (t, done, rew_sum))\n",
        "    # Don't continue if all environments are done.\n",
        "    if np.all(done):\n",
        "      break\n",
        "  return rew_sum, frames, rng\n",
        "\n",
        "\n",
        "# Select the first game from evaluation config. Feel free to change.\n",
        "game_name = 'Breakout'  # @param\n",
        "num_envs = 16  # @param\n",
        "env_fn = build_env_fn(game_name)\n",
        "# Create a batch of environments to evaluate.\n",
        "env_batch = [env_fn() for i in range(num_envs)]\n",
        "\n",
        "rng = jax.random.PRNGKey(0)\n",
        "# NOTE: the evaluation num_steps is shorter than what is used for paper experiments for speed.\n",
        "rew_sum, frames, rng = _batch_rollout(\n",
        "    rng, env_batch, optimal_action, num_steps=5000, log_interval=100)\n",
        "\n",
        "print('scores:', rew_sum, 'average score:', np.mean(rew_sum))"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "b38HbL7RMhuy"
      },
      "outputs": [],
      "source": [
        "print(f'total score: mean: {np.mean(rew_sum)} std: {np.std(rew_sum)} max: {np.max(rew_sum)}')"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "-DjoIz2LMlGh"
      },
      "outputs": [],
      "source": [
        "# @title Plot scores\n",
        "\n",
        "plt.plot(rew_sum, 'o')\n",
        "plt.title(f'Game scores for {game_name}')\n",
        "plt.xlabel('trial index')\n",
        "plt.ylabel('score')"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "DNsdbl89MpZt"
      },
      "outputs": [],
      "source": [
        ""
      ]
    }
  ],
  "metadata": {
    "accelerator": "TPU",
    "colab": {
      "collapsed_sections": [],
      "name": "Multi_game_decision_transformers_public_colab.ipynb",
      "private_outputs": true,
      "provenance": [
        {
          "file_id": "1FBsoloxeBnLoMGBefz42OlrPIbAdE87O",
          "timestamp": 1659525848010
        }
      ]
    },
    "gpuClass": "standard",
    "kernelspec": {
      "display_name": "Python 3",
      "name": "python3"
    },
    "language_info": {
      "name": "python"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 0
}
