{
  "cells": [
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "cellView": "form",
        "id": "RMivPb5DZwpL"
      },
      "outputs": [],
      "source": [
        "#@title Imports\n",
        "\n",
        "# Licensed under the Apache License, Version 2.0\n",
        "\n",
        "import json\n",
        "import io\n",
        "import random\n",
        "import time\n",
        "import numpy as np\n",
        "import statistics\n",
        "import tensorflow.compat.v1 as tf\n",
        "\n",
        "tf.enable_eager_execution()\n",
        "assert tf.executing_eagerly()\n",
        "\n",
        "print(tf.version.VERSION)\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "cellView": "form",
        "id": "P_0tI6aNlvEF"
      },
      "outputs": [],
      "source": [
        "#@title \"COGS\" tagging dataset generation\n",
        "\n",
        "DATA_DIR = 'DATA_DIR'  # Directory containing sequence tagged json files.\n",
        "DATASET_FILES = [DATA_DIR + ifile for ifile in [\n",
        "        \"train_seqtag.jsonl\", \"test_seqtag.jsonl\", \"dev_seqtag.jsonl\",\"gen_seqtag.jsonl\"]\n",
        "]\n",
        "\n",
        "PAD_TOKEN = \"[PAD]\"\n",
        "PAD_PARENT = 99999\n",
        "\n",
        "# Possible parent encodings:\n",
        "# The type of parent encoding is used already in dataset generation.\n",
        "PARENT_ABSOLUTE = \"parent_absolute_encoding\"\n",
        "PARENT_RELATIVE = \"parent_relative_encoding\"\n",
        "PARENT_ATTENTION = \"parent_attention_encoding\"\n",
        "\n",
        "\n",
        "def decode(seq, vocab):\n",
        "  out = \"\"\n",
        "  for tok in seq:\n",
        "    if tok == 0:\n",
        "      return out\n",
        "    out += str(vocab[tok]) + \", \"\n",
        "  return out\n",
        "\n",
        "\n",
        "def read_cogs_datafile(filename):\n",
        "  data = []\n",
        "  print(filename)\n",
        "  with tf.io.gfile.GFile(filename, \"r\") as f:\n",
        "    for line in f:\n",
        "      data.append(json.loads(line))\n",
        "  print(f\"loaded {filename}: {len(data)} instances.\")\n",
        "  return data\n",
        "\n",
        "\n",
        "def split_set_by_distribution(dataset):\n",
        "  \"\"\"Create multiple splits based on the generalization type.\n",
        "\n",
        "  Only COGS geneneralization dataset is annotated by the generalization type.\n",
        "  \"\"\"\n",
        "  distributions = []\n",
        "  split = {}\n",
        "  for example in dataset:\n",
        "    distribution = example[\"distribution\"]\n",
        "    if distribution in split:\n",
        "      split[distribution].append(example)\n",
        "    else:\n",
        "      distributions.append(distribution)\n",
        "      split[distribution] = [example]\n",
        "  for distribution in split:\n",
        "    print(f\"{distribution}: {len(split[distribution])}\")\n",
        "  return split, distributions\n",
        "\n",
        "\n",
        "def create_dataset_feature_tensor(dataset, feature, vocab, max_len, parent_encoding=None):\n",
        "  \"\"\"Read the selected feature from the examples.\n",
        "\n",
        "  Be carfeful about the parent encoding since we comnpare the indices to the\n",
        "  attention matrix.\n",
        "  \"\"\"\n",
        "  feature_tensor = []\n",
        "  for example in dataset:\n",
        "    if feature == \"parent\":\n",
        "      if parent_encoding == PARENT_ABSOLUTE:\n",
        "        assert vocab[0] == PAD_PARENT  # padding\n",
        "        assert vocab[1] == -1  # -1 means no parent\n",
        "        assert vocab[2] == 0   # parent is the 1st token\n",
        "        tensor = [vocab.index(x) for x in example[feature]]\n",
        "      elif parent_encoding == PARENT_RELATIVE:\n",
        "        assert vocab[0] == PAD_PARENT\n",
        "        # Use self instead of -1 to denote no parent.\n",
        "        parents = example[feature]\n",
        "        tensor = [vocab.index(parents[i]-i) if parents[i] != -1 else vocab.index(0) for i in range(len(parents))]\n",
        "      elif parent_encoding == PARENT_ATTENTION:\n",
        "        # Use self instead of -1 to denote no parent.\n",
        "        # The vocab for parent is hardcoded: [-2, 0, 1, 2, ...]\n",
        "        assert vocab[0] == PAD_PARENT\n",
        "        assert vocab[1] == 0\n",
        "        assert vocab[2] == 1\n",
        "        parents = example[feature]\n",
        "        tensor = [vocab.index(parents[i]) if parents[i] != -1 else vocab.index(i) for i in range(len(parents))]\n",
        "      else:\n",
        "        raise ValueError(f\"Undefined parent_encoding: {parent_encoding}\")\n",
        "    else:\n",
        "      tensor = [vocab.index(x) for x in example[feature]]\n",
        "    feature_tensor.append(tensor)\n",
        "  feature_tensor = tf.keras.preprocessing.sequence.pad_sequences(\n",
        "      feature_tensor, padding=\"post\", maxlen=max_len)\n",
        "  return feature_tensor\n",
        "\n",
        "\n",
        "def create_parent_ids_tensor(dataset, max_len):\n",
        "  \"\"\"This is really an input mask.\n",
        "  0 when there is an input token\n",
        "  1 when the input token is padding.\n",
        "  \"\"\"\n",
        "  feature_tensor = []\n",
        "  for example in dataset:\n",
        "    tensor = [0]*max_len\n",
        "    for i in range(len(example[\"tokens\"]), max_len):\n",
        "      tensor[i] = 1\n",
        "    feature_tensor.append(tensor)\n",
        "  feature_tensor = tf.keras.preprocessing.sequence.pad_sequences(\n",
        "      feature_tensor, padding=\"post\", maxlen=max_len)\n",
        "  return feature_tensor\n",
        "\n",
        "\n",
        "def create_dataset_tensors(dataset,\n",
        "                           vocabs,\n",
        "                           max_len,\n",
        "                           batch_size,\n",
        "                           show_example=False,\n",
        "                           parent_encoding=None):\n",
        "  tokens_tensor = create_dataset_feature_tensor(dataset, \"tokens\", vocabs[0],\n",
        "                                                max_len)\n",
        "  parent_ids_tensor = create_parent_ids_tensor(dataset, max_len)\n",
        "  parent_tensor = create_dataset_feature_tensor(dataset, \"parent\", vocabs[1],\n",
        "                                                max_len, parent_encoding)\n",
        "  role_tensor = create_dataset_feature_tensor(dataset, \"role\", vocabs[2],\n",
        "                                              max_len)\n",
        "  category_tensor = create_dataset_feature_tensor(dataset, \"category\",\n",
        "                                                  vocabs[3], max_len)\n",
        "  noun_type_tensor = create_dataset_feature_tensor(dataset, \"noun_type\",\n",
        "                                                   vocabs[4], max_len)\n",
        "  verb_tensor = create_dataset_feature_tensor(dataset, \"verb_name\", vocabs[5],\n",
        "                                              max_len)\n",
        "  buffer_size = len(dataset)\n",
        "  dataset = tf.data.Dataset.from_tensor_slices(\n",
        "      (tokens_tensor, parent_ids_tensor, parent_tensor, role_tensor,\n",
        "       category_tensor, noun_type_tensor, verb_tensor)).shuffle(buffer_size)\n",
        "  dataset = dataset.batch(batch_size, drop_remainder=True)\n",
        "\n",
        "  if show_example:\n",
        "    print(\"- Sample Example ----------------\")\n",
        "    print(f\"tokens: {decode(tokens_tensor[0], vocabs[0])}\")\n",
        "    print(f\"parent_ids: {parent_ids_tensor[0]}\")\n",
        "    print(f\"parent: {decode(parent_tensor[0], vocabs[1])}\")\n",
        "    print(f\"role: {decode(role_tensor[0], vocabs[2])}\")\n",
        "    print(f\"category: {decode(category_tensor[0], vocabs[3])}\")\n",
        "    print(f\"noun_type: {decode(noun_type_tensor[0], vocabs[4])}\")\n",
        "    print(f\"verb_name: {decode(verb_tensor[0], vocabs[5])}\")\n",
        "    print(\"---------------------------------\")\n",
        "\n",
        "  return dataset\n",
        "\n",
        "\n",
        "def read_cogs_datasets(dataset_files, parent_encoding, batch_size):\n",
        "  assert len(dataset_files) == 4, (\n",
        "      \"expected list of dataset paths in this order: train, test, dev, gen; \"\n",
        "      \"got %s\"\n",
        "  ) % dataset_files\n",
        "  cogs_train = dataset_files[0]\n",
        "  cogs_test = dataset_files[1]\n",
        "  cogs_dev = dataset_files[2]\n",
        "  cogs_gen = dataset_files[3]\n",
        "\n",
        "  train_set = read_cogs_datafile(cogs_train)\n",
        "  test_set = read_cogs_datafile(cogs_test)\n",
        "  dev_set = read_cogs_datafile(cogs_dev)\n",
        "  gen_set = read_cogs_datafile(cogs_gen)\n",
        "\n",
        "  # Create vocabs, and calculate dataset stats:\n",
        "  tokens_vocab = [PAD_TOKEN]\n",
        "  # The token with index 0 has to be padding, because loss relies on it.\n",
        "  # -1 is already used in the tagging datased to denote no parent,\n",
        "  # so let's use -2 as the padding token.\n",
        "  parent_vocab_raw = [PAD_PARENT]\n",
        "  role_vocab = [PAD_TOKEN]\n",
        "  category_vocab = [PAD_TOKEN]\n",
        "  noun_type_vocab = [PAD_TOKEN]\n",
        "  verb_name_vocab = [PAD_TOKEN]\n",
        "\n",
        "  max_len = 0\n",
        "  for example in train_set + test_set + dev_set + gen_set:\n",
        "    for token in example[\"tokens\"]:\n",
        "      if token not in tokens_vocab:\n",
        "        tokens_vocab.append(token)\n",
        "    for token in example[\"parent\"]:\n",
        "      if token not in parent_vocab_raw:\n",
        "        parent_vocab_raw.append(token)\n",
        "    for token in example[\"role\"]:\n",
        "      if token not in role_vocab:\n",
        "        role_vocab.append(token)\n",
        "    for token in example[\"category\"]:\n",
        "      if token not in category_vocab:\n",
        "        category_vocab.append(token)\n",
        "    for token in example[\"noun_type\"]:\n",
        "      if token not in noun_type_vocab:\n",
        "        noun_type_vocab.append(token)\n",
        "    for token in example[\"verb_name\"]:\n",
        "      if token not in verb_name_vocab:\n",
        "        verb_name_vocab.append(token)\n",
        "    l = len(example[\"tokens\"])\n",
        "    max_len = max(max_len, l)\n",
        "\n",
        "  if parent_encoding == PARENT_ABSOLUTE:\n",
        "    parent_vocab = [PAD_PARENT, -1] + list(range(max_len))\n",
        "  elif parent_encoding == PARENT_RELATIVE:\n",
        "    parent_vocab = [PAD_PARENT] + list(range(-max_len+1, max_len))\n",
        "  elif parent_encoding == PARENT_ATTENTION:\n",
        "    parent_vocab = [PAD_PARENT] + list(range(max_len))\n",
        "  else:\n",
        "    raise ValueError(f\"Undefined parent_encoding: {parent_encoding}\")\n",
        "\n",
        "  max_len += 1  # guarantee at least one padding token at the end\n",
        "                # for \"no parent\"\n",
        "                # Padding token is also used to stop decoding in decode().\n",
        "\n",
        "  gen_distribution_split, gen_distributions = split_set_by_distribution(gen_set)\n",
        "\n",
        "  print(f\"n_distributions: {len(gen_distribution_split)}\")\n",
        "  print(f\"max_len: {max_len}\")\n",
        "  print(f\"tokens_vocab: {len(tokens_vocab)}  --\u003e\u003e {tokens_vocab}\")\n",
        "  print(f\"parent_vocab: {len(parent_vocab)}  --\u003e\u003e {parent_vocab}\")\n",
        "  parent_vocab_missing = sorted(set(parent_vocab) - set(parent_vocab_raw))\n",
        "  print(f\"parent indices missing from the data: {len(parent_vocab_missing)}  --\u003e\u003e {parent_vocab_missing}\")\n",
        "  print(f\"role_vocab: {len(role_vocab)}  --\u003e\u003e {role_vocab}\")\n",
        "  print(f\"category_vocab: {len(category_vocab)}  --\u003e\u003e {category_vocab}\")\n",
        "  print(f\"noun_type_vocab: {len(noun_type_vocab)}  --\u003e\u003e {noun_type_vocab}\")\n",
        "  print(f\"verb_name_vocab: {len(verb_name_vocab)}  --\u003e\u003e {verb_name_vocab}\")\n",
        "\n",
        "  vocabs = [tokens_vocab, parent_vocab,\n",
        "            role_vocab, category_vocab,\n",
        "            noun_type_vocab, verb_name_vocab]\n",
        "\n",
        "  train_tf_dataset = create_dataset_tensors(\n",
        "      train_set, vocabs, max_len, batch_size, show_example=True, parent_encoding=parent_encoding)\n",
        "  test_tf_dataset = create_dataset_tensors(test_set, vocabs, max_len, batch_size, parent_encoding=parent_encoding)\n",
        "  dev_tf_dataset = create_dataset_tensors(dev_set, vocabs, max_len, batch_size, parent_encoding=parent_encoding)\n",
        "  gen_tf_dataset = create_dataset_tensors(gen_set, vocabs, max_len, batch_size, parent_encoding=parent_encoding)\n",
        "\n",
        "  gen_split_tf_datasets = []\n",
        "  for distribution in gen_distributions:\n",
        "    gen_split_tf_datasets.append(\n",
        "        create_dataset_tensors(gen_distribution_split[distribution],\n",
        "                               [tokens_vocab, parent_vocab,\n",
        "                                role_vocab, category_vocab,\n",
        "                                noun_type_vocab, verb_name_vocab],\n",
        "                               max_len,\n",
        "                               batch_size,\n",
        "                               parent_encoding=parent_encoding))\n",
        "\n",
        "  test_sets = [test_tf_dataset, dev_tf_dataset\n",
        "              ] + gen_split_tf_datasets + [gen_tf_dataset]\n",
        "  test_sets_names = [\"test\", \"dev\"] + gen_distributions + [\"gen\"]\n",
        "\n",
        "  return train_tf_dataset, test_sets, test_sets_names, vocabs, max_len\n",
        "\n",
        "\n",
        "def set_up_cogs(parent_encoding, batch_size):\n",
        "  return read_cogs_datasets(DATASET_FILES, parent_encoding, batch_size)\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "cellView": "form",
        "id": "gjcFDxENhpzQ"
      },
      "outputs": [],
      "source": [
        "#@title This code block defines Encoder layer, following this Transformer tutorial (adding relative attention, a copy decoder, and a few other improvements): https://www.tensorflow.org/tutorials/text/transformer\n",
        "\n",
        "# Attention types:\n",
        "RELATIVE = \"relative\"\n",
        "RELATIVE_BIAS = \"relative_bias\"\n",
        "RELATIVE8 = \"relative8\"\n",
        "RELATIVE16 = \"relative16\"\n",
        "RELATIVE16_BIAS = \"relative16_bias\"\n",
        "ABSOLUTE_SINUSOIDAL = \"absolute_sinusoidal\"\n",
        "\n",
        "\n",
        "def get_angles(pos, i, d_model):\n",
        "  angle_rates = 1 / np.power(10000, (2 * (i//2)) / np.float32(d_model))\n",
        "  return pos * angle_rates\n",
        "\n",
        "\n",
        "def positional_encoding(position, d_model):\n",
        "  angle_rads = get_angles(np.arange(position)[:, np.newaxis],\n",
        "                          np.arange(d_model)[np.newaxis, :],\n",
        "                          d_model)\n",
        "\n",
        "  # apply sin to even indices in the array; 2i\n",
        "  angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])\n",
        "\n",
        "  # apply cos to odd indices in the array; 2i+1\n",
        "  angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])\n",
        "\n",
        "  pos_encoding = angle_rads[np.newaxis, ...]\n",
        "\n",
        "  return tf.cast(pos_encoding, dtype=tf.float32)\n",
        "\n",
        "\n",
        "def create_padding_mask(seq):\n",
        "  seq = tf.cast(tf.math.equal(seq, 0), tf.float32)\n",
        "\n",
        "  # add extra dimensions to add the padding\n",
        "  # to the attention logits.\n",
        "  return seq[:, tf.newaxis, tf.newaxis, :]  # (batch_size, 1, 1, seq_len)\n",
        "\n",
        "\n",
        "def create_look_ahead_mask(size):\n",
        "  mask = 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0)\n",
        "  return mask  # (seq_len, seq_len)\n",
        "\n",
        "\n",
        "def create_masks(inp):\n",
        "  # Encoder padding mask\n",
        "  enc_padding_mask = create_padding_mask(inp)\n",
        "\n",
        "  return enc_padding_mask\n",
        "\n",
        "\n",
        "def create_relative_ids(inp_len, relative_radius):\n",
        "  enc_relative_ids = np.zeros([inp_len, inp_len], dtype=int)\n",
        "  for i in range(inp_len):\n",
        "    for j in range(inp_len):\n",
        "      diff = i - j\n",
        "      diff = relative_radius + min(max(diff, -relative_radius), relative_radius)\n",
        "      enc_relative_ids[i][j] = diff\n",
        "\n",
        "  return tf.constant(enc_relative_ids)\n",
        "\n",
        "\n",
        "def scaled_dot_product_relative_attention(q, k, v,\n",
        "                                          mask=None,\n",
        "                                          relative_ids=None,\n",
        "                                          relative_embeddings=None,\n",
        "                                          relative_biases=None):\n",
        "  \"\"\"Calculate the attention weights.\n",
        "  - If relative_ids or relative_embeddings is None, then this is equivalent to\n",
        "    regular scaled_dot_product_attention.\n",
        "  - q, k, v must have matching leading dimensions.\n",
        "  - k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.\n",
        "  - The mask has different shapes depending on its type(padding or look ahead)\n",
        "    but it must be broadcastable for addition.\n",
        "  - relative_ids is an optional [seq_len_q, seq_len_k] with the relative ids.\n",
        "  - relative_embeddings is an optional dense layer that converts OHE ids to\n",
        "    embeddings.\n",
        "  - relative_biases: optional dense layer that generates a bias term for the\n",
        "    attention logits based on the relative_ids\n",
        "\n",
        "  Args:\n",
        "    q: query shape == (..., seq_len_q, depth)\n",
        "    k: key shape == (..., seq_len_k, depth)\n",
        "    v: value shape == (..., seq_len_v, depth_v)\n",
        "    mask: Float tensor with shape broadcastable\n",
        "          to (..., seq_len_q, seq_len_k). Defaults to None.\n",
        "    relative_ids == (seq_len_q, seq_len_k). Defaults to None.\n",
        "    relative_embeddings == dense layer. Defaults to None.\n",
        "\n",
        "  Returns:\n",
        "    output, attention_weights\n",
        "  \"\"\"\n",
        "\n",
        "  matmul_qk = tf.matmul(q, k, transpose_b=True)  # (..., seq_len_q, seq_len_k)\n",
        "\n",
        "  if relative_ids is not None:\n",
        "    if relative_embeddings is not None:\n",
        "      r = relative_embeddings(relative_ids)\n",
        "      if len(q.shape) == 4:\n",
        "        matmul_qrel = tf.einsum(\"bhqd,qkd-\u003ebhqk\", q, r)\n",
        "        matmul_qk += matmul_qrel\n",
        "      elif len(q.shape) == 3:\n",
        "        matmul_qrel = tf.einsum(\"bqd,qkd-\u003ebqk\", q, r)\n",
        "        matmul_qk += matmul_qrel\n",
        "      else:\n",
        "        raise ValueError(f\"Query must have dimension 4 or 3 (only 1 head), but has {len(q.shape)}\")\n",
        "    if relative_biases is not None:\n",
        "      matmul_qk += tf.squeeze(relative_biases(relative_ids), axis=-1)\n",
        "\n",
        "  # scale matmul_qk\n",
        "  dk = tf.cast(tf.shape(k)[-1], tf.float32)\n",
        "  scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)\n",
        "\n",
        "  # add the mask to the scaled tensor.\n",
        "  if mask is not None:\n",
        "    scaled_attention_logits += (mask * -1e9)\n",
        "\n",
        "  # softmax is normalized on the last axis (seq_len_k) so that the scores\n",
        "  # add up to 1.\n",
        "  attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1)  # (..., seq_len_q, seq_len_k)\n",
        "\n",
        "  if v is not None:\n",
        "    output = tf.matmul(attention_weights, v)  # (..., seq_len_q, depth_v)\n",
        "    return output, attention_weights\n",
        "  else:\n",
        "    return None, attention_weights\n",
        "\n",
        "\n",
        "class MultiHeadAttention(tf.keras.layers.Layer):\n",
        "  def __init__(self, d_model, num_heads, relative_radius, position_encodings):\n",
        "    super(MultiHeadAttention, self).__init__()\n",
        "    self.num_heads = num_heads\n",
        "    self.d_model = d_model\n",
        "    self.relative_vocab_size = relative_radius*2+1\n",
        "\n",
        "    assert d_model % self.num_heads == 0\n",
        "\n",
        "    self.depth = d_model // self.num_heads\n",
        "\n",
        "    self.wq = tf.keras.layers.Dense(d_model)\n",
        "    self.wk = tf.keras.layers.Dense(d_model)\n",
        "    self.wv = tf.keras.layers.Dense(d_model)\n",
        "\n",
        "    self.dense = tf.keras.layers.Dense(d_model)\n",
        "\n",
        "    if (position_encodings == RELATIVE or\n",
        "        position_encodings == RELATIVE_BIAS):\n",
        "      self.relative_embeddings = tf.keras.layers.Embedding(\n",
        "          self.relative_vocab_size, self.depth)\n",
        "    else:\n",
        "      self.relative_embeddings = None\n",
        "    if position_encodings == RELATIVE_BIAS:\n",
        "      self.relative_biases = tf.keras.layers.Embedding(\n",
        "          self.relative_vocab_size, 1)\n",
        "    else:\n",
        "      self.relative_biases = None\n",
        "\n",
        "  def split_heads(self, x, batch_size):\n",
        "    \"\"\"Split the last dimension into (num_heads, depth).\n",
        "    Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth)\n",
        "    \"\"\"\n",
        "    x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))\n",
        "    return tf.transpose(x, perm=[0, 2, 1, 3])\n",
        "\n",
        "  def call(self, v, k, q, mask=None, relative_ids=None):\n",
        "    batch_size = tf.shape(q)[0]\n",
        "\n",
        "    q = self.wq(q)  # (batch_size, seq_len, d_model)\n",
        "    k = self.wk(k)  # (batch_size, seq_len, d_model)\n",
        "    v = self.wv(v)  # (batch_size, seq_len, d_model)\n",
        "\n",
        "    q = self.split_heads(q, batch_size)  # (batch_size, num_heads, seq_len_q, depth)\n",
        "    k = self.split_heads(k, batch_size)  # (batch_size, num_heads, seq_len_k, depth)\n",
        "    v = self.split_heads(v, batch_size)  # (batch_size, num_heads, seq_len_v, depth)\n",
        "\n",
        "    # scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth)\n",
        "    # attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k)\n",
        "    scaled_attention, attention_logits = scaled_dot_product_relative_attention(\n",
        "        q, k, v, mask, relative_ids, self.relative_embeddings, self.relative_biases)\n",
        "\n",
        "    scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3])  # (batch_size, seq_len_q, num_heads, depth)\n",
        "\n",
        "    concat_attention = tf.reshape(scaled_attention,\n",
        "                                  (batch_size, -1, self.d_model))  # (batch_size, seq_len_q, d_model)\n",
        "\n",
        "    output = self.dense(concat_attention)  # (batch_size, seq_len_q, d_model)\n",
        "\n",
        "    return output, attention_logits\n",
        "\n",
        "\n",
        "def point_wise_feed_forward_network(d_model, dff):\n",
        "  return tf.keras.Sequential([\n",
        "      tf.keras.layers.Dense(dff, activation=\"relu\"),  # (batch_size, seq_len, dff)\n",
        "      tf.keras.layers.Dense(d_model)  # (batch_size, seq_len, d_model)\n",
        "  ])\n",
        "\n",
        "\n",
        "class EncoderLayer(tf.keras.layers.Layer):\n",
        "  def __init__(self, d_model, num_heads, dff, position_encodings, rate=0.1, relative_radius=8):\n",
        "    super(EncoderLayer, self).__init__()\n",
        "\n",
        "    self.mha = MultiHeadAttention(d_model, num_heads, relative_radius, position_encodings)\n",
        "    self.ffn = point_wise_feed_forward_network(d_model, dff)\n",
        "\n",
        "    self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)\n",
        "    self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)\n",
        "\n",
        "    self.dropout1 = tf.keras.layers.Dropout(rate)\n",
        "    self.dropout2 = tf.keras.layers.Dropout(rate)\n",
        "\n",
        "  def call(self, x, training, mask, relative_ids):\n",
        "\n",
        "    attn_output, _ = self.mha(x, x, x, mask, relative_ids)  # (batch_size, input_seq_len, d_model)\n",
        "    attn_output = self.dropout1(attn_output, training=training)\n",
        "    out1 = self.layernorm1(x + attn_output)  # (batch_size, input_seq_len, d_model)\n",
        "\n",
        "    ffn_output = self.ffn(out1)  # (batch_size, input_seq_len, d_model)\n",
        "    ffn_output = self.dropout2(ffn_output, training=training)\n",
        "    out2 = self.layernorm2(out1 + ffn_output)  # (batch_size, input_seq_len, d_model)\n",
        "\n",
        "    return out2\n",
        "\n",
        "\n",
        "class Encoder(tf.keras.layers.Layer):\n",
        "  def __init__(self, num_layers, d_model, num_heads, dff, input_vocab_size,\n",
        "               maximum_position_encoding, rate=0.1, relative_radius=8,\n",
        "               position_encodings=RELATIVE,\n",
        "               shared_layers=False):\n",
        "    super(Encoder, self).__init__()\n",
        "\n",
        "    self.d_model = d_model\n",
        "    self.num_layers = num_layers\n",
        "    self.position_encodings = position_encodings\n",
        "    self.shared_layers = shared_layers\n",
        "\n",
        "    self.embedding = tf.keras.layers.Embedding(input_vocab_size, d_model)\n",
        "    if self.position_encodings == ABSOLUTE_SINUSOIDAL:\n",
        "      self.pos_encoding = positional_encoding(maximum_position_encoding,\n",
        "                                              self.d_model)\n",
        "\n",
        "    if self.shared_layers:\n",
        "      layer = EncoderLayer(d_model, num_heads, dff, position_encodings,\n",
        "                           rate, relative_radius=relative_radius)\n",
        "      self.enc_layers = [layer for _ in range(num_layers)]\n",
        "    else:\n",
        "      self.enc_layers = [EncoderLayer(d_model, num_heads, dff, position_encodings,\n",
        "                                      rate, relative_radius=relative_radius)\n",
        "                         for _ in range(num_layers)]\n",
        "\n",
        "    self.dropout = tf.keras.layers.Dropout(rate)\n",
        "\n",
        "  def call(self, x, training, mask, relative_ids):\n",
        "\n",
        "    seq_len = tf.shape(x)[1]\n",
        "\n",
        "    # adding embedding and position encoding.\n",
        "    x = self.embedding(x)  # (batch_size, input_seq_len, d_model)\n",
        "    x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))\n",
        "    if self.position_encodings == ABSOLUTE_SINUSOIDAL:\n",
        "      relative_ids = None\n",
        "      x += self.pos_encoding[:, :seq_len, :]\n",
        "\n",
        "    x = self.dropout(x, training=training)\n",
        "\n",
        "    for i in range(self.num_layers):\n",
        "      x = self.enc_layers[i](x, training, mask, relative_ids)\n",
        "\n",
        "    return x  # (batch_size, input_seq_len, d_model)\n",
        "\n",
        "  def detailed_param_count(self):\n",
        "    print(f\"  encoder embedding: {self.embedding.count_params()}\")\n",
        "    if self.shared_layers:\n",
        "      print(f\"  encoder layer weights (shared): {self.enc_layers[0].count_params()}\")\n",
        "    else:\n",
        "      print(f\"  encoder layer weights: {self.enc_layers[0].count_params()*len(self.enc_layers)}\")\n",
        "\n",
        "\n",
        "class COGSModel(tf.keras.Model):\n",
        "  def __init__(self, num_layers, d_model, num_heads, dff, vocabs,\n",
        "               pe_input, rate=0.1,\n",
        "               relative_radius=8, position_encodings=RELATIVE,\n",
        "               shared_layers=False,\n",
        "               parent_encoding=None):\n",
        "    super(COGSModel, self).__init__()\n",
        "    self.pe_input = pe_input # input length\n",
        "    self.tokens_vocab_size = len(vocabs[0])\n",
        "    self.parent_vocab_size = len(vocabs[1])\n",
        "    self.role_vocab_size = len(vocabs[2])\n",
        "    self.category_vocab_size = len(vocabs[3])\n",
        "    self.noun_type_vocab_size = len(vocabs[4])\n",
        "    self.verb_name_vocab_size = len(vocabs[5])\n",
        "\n",
        "    self.position_encodings = position_encodings\n",
        "    self.encoder = Encoder(num_layers, d_model, num_heads, dff,\n",
        "                           self.tokens_vocab_size, pe_input, rate,\n",
        "                           relative_radius=relative_radius,\n",
        "                           position_encodings=position_encodings,\n",
        "                           shared_layers=shared_layers)\n",
        "\n",
        "    self.parent_encoding = parent_encoding\n",
        "    self.relative_vocab_size = relative_radius * 2 + 1\n",
        "    if parent_encoding == PARENT_ABSOLUTE or parent_encoding == PARENT_RELATIVE:\n",
        "      self.parent_layer = tf.keras.layers.Dense(self.parent_vocab_size)\n",
        "    elif parent_encoding == PARENT_ATTENTION:\n",
        "      # Note: ABSOLUTE_SINUSOIDAL should also work with parent attention, since\n",
        "      # the positional embeddings are added in the encoder.\n",
        "      self.parent_layer_query = tf.keras.layers.Dense(d_model)\n",
        "      self.parent_layer_key = tf.keras.layers.Dense(d_model)\n",
        "      if position_encodings == RELATIVE or position_encodings == RELATIVE_BIAS:\n",
        "        self.parent_relative_embeddings = tf.keras.layers.Embedding(\n",
        "            self.relative_vocab_size, d_model)\n",
        "      else:\n",
        "        self.parent_relative_embeddings = None\n",
        "      if position_encodings == RELATIVE_BIAS:\n",
        "        self.parent_relative_biases = tf.keras.layers.Embedding(\n",
        "            self.relative_vocab_size, 1)\n",
        "      else:\n",
        "        self.parent_relative_biases = None\n",
        "    else:\n",
        "      raise ValueError(f\"Undefined parent_encoding: {parent_encoding}\")\n",
        "    self.role_layer = tf.keras.layers.Dense(self.role_vocab_size)\n",
        "    self.category_layer = tf.keras.layers.Dense(self.category_vocab_size)\n",
        "    self.noun_type_layer = tf.keras.layers.Dense(self.noun_type_vocab_size)\n",
        "    self.verb_name_layer = tf.keras.layers.Dense(self.verb_name_vocab_size)\n",
        "\n",
        "  def call(self, inp, parent_ids, training, enc_padding_mask, enc_relative_ids):\n",
        "\n",
        "    enc_output = self.encoder(\n",
        "        inp, training, enc_padding_mask,\n",
        "        enc_relative_ids)  # (batch_size, inp_seq_len, d_model)\n",
        "\n",
        "    if self.parent_encoding == PARENT_ABSOLUTE or self.parent_encoding == PARENT_RELATIVE:\n",
        "      parent_output = tf.nn.softmax(self.parent_layer(enc_output), axis=-1)\n",
        "    elif self.parent_encoding == PARENT_ATTENTION:\n",
        "      parent_query = self.parent_layer_query(enc_output)\n",
        "      parent_key = self.parent_layer_key(enc_output)\n",
        "      enc_padding_mask_noheads = tf.squeeze(enc_padding_mask, axis=1)\n",
        "      _, attention = scaled_dot_product_relative_attention(\n",
        "          parent_query,\n",
        "          parent_key,\n",
        "          None,  # No values needed here.\n",
        "          mask=enc_padding_mask_noheads,\n",
        "          relative_ids=enc_relative_ids,\n",
        "          relative_embeddings=self.parent_relative_embeddings,\n",
        "          relative_biases=self.parent_relative_biases)\n",
        "      # attention.shape == batch, inp_seq_len, inp_seq_len\n",
        "      # parent_vocab has pad (-2) token at 0 index, and then sorted indices to the token input.\n",
        "      parent_output = tf.roll(attention, shift=1, axis=-1)\n",
        "    else:\n",
        "      raise ValueError(f\"Undefined parent_encoding: {self.parent_encoding}\")\n",
        "\n",
        "    role_output = tf.nn.softmax(self.role_layer(enc_output), axis=-1)\n",
        "    category_output = tf.nn.softmax(self.category_layer(enc_output), axis=-1)\n",
        "    noun_type_output = tf.nn.softmax(self.noun_type_layer(enc_output), axis=-1)\n",
        "    verb_name_output = tf.nn.softmax(self.verb_name_layer(enc_output), axis=-1)\n",
        "\n",
        "    return parent_output, role_output, category_output, noun_type_output, verb_name_output\n",
        "\n",
        "  def detailed_param_count(self):\n",
        "    print(\"Transformer parameter counts:\")\n",
        "    self.encoder.detailed_param_count()\n",
        "    if self.parent_encoding == PARENT_ABSOLUTE or self.parent_encoding == PARENT_RELATIVE:\n",
        "      print(f\"  role_layer: {self.role_layer.count_params()}\")\n",
        "    elif self.parent_encoding == PARENT_ATTENTION:\n",
        "      print(f\"  parent_layer_query: {self.parent_layer_query.count_params()}\")\n",
        "      print(f\"  parent_layer_key: {self.parent_layer_key.count_params()}\")\n",
        "      print(\"  ... possibly also relative embedding and biases\")\n",
        "    else:\n",
        "      ValueError(f\"Undefined parent_encoding: {self.parent_encoding}\")\n",
        "    print(f\"  role_layer: {self.role_layer.count_params()}\")\n",
        "    print(f\"  category_layer: {self.category_layer.count_params()}\")\n",
        "    print(f\"  noun_type_layer: {self.noun_type_layer.count_params()}\")\n",
        "    print(f\"  verb_name_layer: {self.verb_name_layer.count_params()}\")\n",
        "\n",
        "\n",
        "class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):\n",
        "  def __init__(self, d_model, warmup_steps=4000):\n",
        "    super(CustomSchedule, self).__init__()\n",
        "\n",
        "    self.d_model = d_model\n",
        "    self.d_model = tf.cast(self.d_model, tf.float32)\n",
        "\n",
        "    self.warmup_steps = warmup_steps\n",
        "\n",
        "  def __call__(self, step):\n",
        "    arg1 = tf.math.rsqrt(step)\n",
        "    arg2 = step * (self.warmup_steps ** -1.5)\n",
        "\n",
        "    return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "cellView": "form",
        "id": "lenjYYMafjDt"
      },
      "outputs": [],
      "source": [
        "#@title Experiment execution function\n",
        "\n",
        "def run_evaluation(dataset,\n",
        "                   num_layers, d_model, dff, num_heads, position_encoding,\n",
        "                   parent_encoding,\n",
        "                   share_layer_weights,\n",
        "                   num_epochs,\n",
        "                   num_repetitions=1,\n",
        "                   save_results_every_n_epochs=None,\n",
        "                   batch_size=64):\n",
        "  assert dataset == \"cogs_tag\"\n",
        "  dataset_train, dataset_val_list, test_sets_names, vocabs, max_len = set_up_cogs(parent_encoding, batch_size)\n",
        "\n",
        "  if save_results_every_n_epochs is None:\n",
        "    save_results_every_n_epochs = num_epochs\n",
        "\n",
        "  if position_encoding == RELATIVE8:\n",
        "    position_encoding = RELATIVE\n",
        "    relative_radius = 8\n",
        "  elif position_encoding == RELATIVE16:\n",
        "    position_encoding = RELATIVE\n",
        "    relative_radius = 16\n",
        "  elif position_encoding == RELATIVE16_BIAS:\n",
        "    position_encoding = RELATIVE_BIAS\n",
        "    relative_radius = 16\n",
        "  elif position_encoding == ABSOLUTE_SINUSOIDAL:\n",
        "    position_encoding = ABSOLUTE_SINUSOIDAL\n",
        "    relative_radius = 0\n",
        "  else:\n",
        "    raise ValueError(f\"Undefined position embeddings type: {position_encoding}\")\n",
        "\n",
        "  dropout_rate = 0.1\n",
        "\n",
        "  repetition_metrics_l = []\n",
        "  for repetition in range(num_repetitions):\n",
        "    print(f\"Starting repetition {repetition} ...\\n\")\n",
        "\n",
        "    learning_rate = CustomSchedule(d_model)\n",
        "    optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98,\n",
        "                                         epsilon=1e-9)\n",
        "    loss_object = tf.keras.losses.SparseCategoricalCrossentropy(\n",
        "        from_logits=False, reduction=\"none\")\n",
        "    train_loss = tf.keras.metrics.Mean(name=\"train_loss\")\n",
        "    train_accuracy = tf.keras.metrics.Mean(name=\"train_accuracy\")\n",
        "    eval_loss = tf.keras.metrics.Mean(name=\"train_loss\")\n",
        "    eval_accuracy = tf.keras.metrics.Mean(name=\"train_accuracy\")\n",
        "\n",
        "    transformer = COGSModel(\n",
        "        num_layers,\n",
        "        d_model,\n",
        "        num_heads,\n",
        "        dff,\n",
        "        vocabs,\n",
        "        pe_input=max_len,\n",
        "        rate=dropout_rate,\n",
        "        relative_radius=relative_radius,\n",
        "        position_encodings=position_encoding,\n",
        "        shared_layers=share_layer_weights,\n",
        "        parent_encoding=parent_encoding)\n",
        "\n",
        "    def loss_function(real, pred):\n",
        "      mask = tf.math.logical_not(tf.math.equal(real, 0))\n",
        "      loss_ = loss_object(real, pred)\n",
        "\n",
        "      mask = tf.cast(mask, dtype=loss_.dtype)\n",
        "      loss_ *= mask\n",
        "\n",
        "      return tf.reduce_sum(loss_)/tf.reduce_sum(mask)\n",
        "\n",
        "    def accuracy_function(real, pred):\n",
        "      mask = tf.math.logical_not(tf.math.equal(real, 0))\n",
        "      loss_ = tf.keras.metrics.sparse_categorical_accuracy(real, pred)\n",
        "\n",
        "      mask = tf.cast(mask, dtype=loss_.dtype)\n",
        "      loss_ *= mask\n",
        "\n",
        "      return tf.reduce_sum(loss_)/tf.reduce_sum(mask)\n",
        "\n",
        "    train_step_signature = [\n",
        "        tf.TensorSpec(shape=(None, None), dtype=tf.int32),\n",
        "        tf.TensorSpec(shape=(None, None), dtype=tf.int32),\n",
        "        tf.TensorSpec(shape=(None, None), dtype=tf.int32),\n",
        "        tf.TensorSpec(shape=(None, None), dtype=tf.int32),\n",
        "        tf.TensorSpec(shape=(None, None), dtype=tf.int32),\n",
        "        tf.TensorSpec(shape=(None, None), dtype=tf.int32),\n",
        "        tf.TensorSpec(shape=(None, None), dtype=tf.int32)\n",
        "    ]\n",
        "\n",
        "    @tf.function(input_signature=train_step_signature)\n",
        "    def train_step(inp, parent_ids, targ_parent, targ_role, targ_category, targ_noun_type, targ_verb_name):\n",
        "      enc_padding_mask = create_masks(inp)\n",
        "      enc_relative_ids = create_relative_ids(max_len, relative_radius)\n",
        "      with tf.GradientTape() as tape:\n",
        "        (parent_predictions,\n",
        "         role_predictions,\n",
        "         category_predictions,\n",
        "         noun_type_predictions,\n",
        "         verb_name_predictions) = transformer(inp, parent_ids, True,\n",
        "                                              enc_padding_mask,\n",
        "                                              enc_relative_ids)\n",
        "        loss = loss_function(targ_parent, parent_predictions)\n",
        "        accuracy = accuracy_function(targ_parent, parent_predictions)\n",
        "        loss += loss_function(targ_role, role_predictions)\n",
        "        accuracy += accuracy_function(targ_role, role_predictions)\n",
        "        loss += loss_function(targ_category, category_predictions)\n",
        "        accuracy += accuracy_function(targ_category, category_predictions)\n",
        "        loss += loss_function(targ_noun_type, noun_type_predictions)\n",
        "        accuracy += accuracy_function(targ_noun_type, noun_type_predictions)\n",
        "        loss += loss_function(targ_verb_name, verb_name_predictions)\n",
        "        accuracy += accuracy_function(targ_verb_name, verb_name_predictions)\n",
        "        loss /= 5\n",
        "        accuracy /= 5\n",
        "\n",
        "      gradients = tape.gradient(loss, transformer.trainable_variables)\n",
        "      optimizer.apply_gradients(zip(gradients, transformer.trainable_variables))\n",
        "      train_loss(loss)\n",
        "      train_accuracy(accuracy)\n",
        "\n",
        "    @tf.function(input_signature=train_step_signature)\n",
        "    def eval_step(inp, parent_ids, targ_parent, targ_role, targ_category, targ_noun_type, targ_verb_name):\n",
        "      enc_padding_mask = create_masks(inp)\n",
        "      enc_relative_ids = create_relative_ids(max_len, relative_radius)\n",
        "      with tf.GradientTape() as _:\n",
        "        (parent_predictions,\n",
        "         role_predictions,\n",
        "         category_predictions,\n",
        "         noun_type_predictions,\n",
        "         verb_name_predictions) = transformer(inp, parent_ids, False,\n",
        "                                              enc_padding_mask,\n",
        "                                              enc_relative_ids)\n",
        "        loss = loss_function(targ_parent, parent_predictions)\n",
        "        accuracy = accuracy_function(targ_parent, parent_predictions)\n",
        "        loss += loss_function(targ_role, role_predictions)\n",
        "        accuracy += accuracy_function(targ_role, role_predictions)\n",
        "        loss += loss_function(targ_category, category_predictions)\n",
        "        accuracy += accuracy_function(targ_category, category_predictions)\n",
        "        loss += loss_function(targ_noun_type, noun_type_predictions)\n",
        "        accuracy += accuracy_function(targ_noun_type, noun_type_predictions)\n",
        "        loss += loss_function(targ_verb_name, verb_name_predictions)\n",
        "        accuracy += accuracy_function(targ_verb_name, verb_name_predictions)\n",
        "        loss /= 5\n",
        "        accuracy /= 5\n",
        "      eval_loss(loss)\n",
        "      eval_accuracy(accuracy)\n",
        "\n",
        "    def eval_step_detailed(inp, parent_ids, targ_parent, targ_role, targ_category, targ_noun_type, targ_verb_name, max_to_show,\n",
        "                           vocabs):\n",
        "      enc_padding_mask = create_masks(inp)\n",
        "      enc_relative_ids = create_relative_ids(max_len, relative_radius)\n",
        "      with tf.GradientTape() as _:\n",
        "        (parent_predictions,\n",
        "         role_predictions,\n",
        "         category_predictions,\n",
        "         noun_type_predictions,\n",
        "         verb_name_predictions) = transformer(inp, parent_ids, False,\n",
        "                                              enc_padding_mask,\n",
        "                                              enc_relative_ids)\n",
        "\n",
        "      predicted_targ_parent = tf.argmax(parent_predictions, 2).numpy()\n",
        "      predicted_targ_role = tf.argmax(role_predictions, 2).numpy()\n",
        "      predicted_targ_category = tf.argmax(category_predictions, 2).numpy()\n",
        "      predicted_targ_noun_type = tf.argmax(noun_type_predictions, 2).numpy()\n",
        "      predicted_targ_verb_name = tf.argmax(verb_name_predictions, 2).numpy()\n",
        "\n",
        "      accuracy_seq_detailed = [0, 0, 0, 0, 0]\n",
        "      accuracy_seq = 0\n",
        "      accuracy_tok = 0\n",
        "      total_tok = 0\n",
        "      shown = 0\n",
        "      parent_ground_truth = targ_parent.numpy()\n",
        "      role_ground_truth = targ_role.numpy()\n",
        "      category_ground_truth = targ_category.numpy()\n",
        "      noun_type_ground_truth = targ_noun_type.numpy()\n",
        "      verb_name_ground_truth = targ_verb_name.numpy()\n",
        "      for i in range(batch_size):\n",
        "        # clear out the padding predictions:\n",
        "        for j in range(len(parent_ground_truth[i])):\n",
        "          if role_ground_truth[i][j] == 0:\n",
        "            predicted_targ_parent[i][j] = 0\n",
        "            predicted_targ_role[i][j] = 0\n",
        "            predicted_targ_category[i][j] = 0\n",
        "            predicted_targ_noun_type[i][j] = 0\n",
        "            predicted_targ_verb_name[i][j] = 0\n",
        "        for k in range(len(parent_ground_truth[i])):\n",
        "          if parent_ground_truth[i][k] != 0:\n",
        "            if predicted_targ_parent[i][k] == parent_ground_truth[i][k]:\n",
        "              accuracy_tok += 1\n",
        "            if predicted_targ_role[i][k] == role_ground_truth[i][k]:\n",
        "              accuracy_tok += 1\n",
        "            if predicted_targ_category[i][k] == category_ground_truth[i][k]:\n",
        "              accuracy_tok += 1\n",
        "            if predicted_targ_noun_type[i][k] == noun_type_ground_truth[i][k]:\n",
        "              accuracy_tok += 1\n",
        "            if predicted_targ_verb_name[i][k] == verb_name_ground_truth[i][k]:\n",
        "              accuracy_tok += 1\n",
        "            total_tok += 5\n",
        "            # total_tok += 4\n",
        "        if (predicted_targ_parent[i] == parent_ground_truth[i]).all():\n",
        "          accuracy_seq_detailed[0] += 1\n",
        "        if (predicted_targ_role[i] == role_ground_truth[i]).all():\n",
        "          accuracy_seq_detailed[1] += 1\n",
        "        if (predicted_targ_category[i] == category_ground_truth[i]).all():\n",
        "          accuracy_seq_detailed[2] += 1\n",
        "        if (predicted_targ_noun_type[i] == noun_type_ground_truth[i]).all():\n",
        "          accuracy_seq_detailed[3] += 1\n",
        "        if (predicted_targ_verb_name[i] == verb_name_ground_truth[i]).all():\n",
        "          accuracy_seq_detailed[4] += 1\n",
        "        if ((predicted_targ_parent[i] == parent_ground_truth[i]).all() and\n",
        "            (predicted_targ_role[i] == role_ground_truth[i]).all() and\n",
        "            (predicted_targ_category[i] == category_ground_truth[i]).all() and\n",
        "            (predicted_targ_noun_type[i] == noun_type_ground_truth[i]).all() and\n",
        "            (predicted_targ_verb_name[i] == verb_name_ground_truth[i]).all()):\n",
        "          accuracy_seq += 1\n",
        "        else:\n",
        "          if shown \u003c max_to_show:\n",
        "            print(\"--------------\")\n",
        "            print(\"tokens:       \" + decode(inp.numpy()[i], vocabs[0]))\n",
        "            print(\"parent gt:    \" + decode(parent_ground_truth[i], vocabs[1]))\n",
        "            print(\"role gt:      \" + decode(role_ground_truth[i], vocabs[2]))\n",
        "            print(\"category gt:  \" + decode(category_ground_truth[i], vocabs[3]))\n",
        "            print(\"noun type gt: \" + decode(noun_type_ground_truth[i], vocabs[4]))\n",
        "            print(\"verb name gt: \" + decode(verb_name_ground_truth[i], vocabs[5]))\n",
        "            print(\"parent:       \" + decode(predicted_targ_parent[i], vocabs[1]))\n",
        "            print(\"role:         \" + decode(predicted_targ_role[i], vocabs[2]))\n",
        "            print(\"category:     \" + decode(predicted_targ_category[i], vocabs[3]))\n",
        "            print(\"noun type:    \" + decode(predicted_targ_noun_type[i], vocabs[4]))\n",
        "            print(\"verb name:    \" + decode(predicted_targ_verb_name[i], vocabs[5]))\n",
        "            shown += 1\n",
        "      for i in range(5):\n",
        "        accuracy_seq_detailed[i] /= float(batch_size)\n",
        "      return accuracy_tok / float(total_tok), accuracy_seq / float(batch_size), accuracy_seq_detailed\n",
        "\n",
        "    def evaluate_in_set(dataset_val, vocabs):\n",
        "      steps_per_epoch_val = dataset_val.cardinality()\n",
        "      n_test_batches = 0\n",
        "      test_accuracy_token = 0\n",
        "      test_accuracy_seq = 0\n",
        "      test_accuracy_seq_detailed = [0, 0, 0, 0, 0]\n",
        "      for (batch, (inp, parent_ids,\n",
        "                   targ_parent, targ_role,\n",
        "                   targ_category, targ_noun_type,\n",
        "                   targ_verb_name)) in enumerate(dataset_val.take(steps_per_epoch_val)):\n",
        "        if batch == 0:\n",
        "          batch_accuracy_token, batch_accuracy_seq, batch_accuracy_seq_detailed = eval_step_detailed(\n",
        "              inp, parent_ids, targ_parent, targ_role,\n",
        "              targ_category, targ_noun_type,\n",
        "              targ_verb_name, 0, vocabs)\n",
        "          test_accuracy_token += batch_accuracy_token\n",
        "          test_accuracy_seq += batch_accuracy_seq\n",
        "          for i in range(5):\n",
        "            test_accuracy_seq_detailed[i] += batch_accuracy_seq_detailed[i]\n",
        "        else:\n",
        "          batch_accuracy_token, batch_accuracy_seq, batch_accuracy_seq_detailed = eval_step_detailed(\n",
        "              inp, parent_ids, targ_parent, targ_role,\n",
        "              targ_category, targ_noun_type,\n",
        "              targ_verb_name, 0, vocabs)\n",
        "          test_accuracy_token += batch_accuracy_token\n",
        "          test_accuracy_seq += batch_accuracy_seq\n",
        "          for i in range(5):\n",
        "            test_accuracy_seq_detailed[i] += batch_accuracy_seq_detailed[i]\n",
        "        n_test_batches += 1\n",
        "\n",
        "      print(\n",
        "          f\"Eval accuracy (token level): {test_accuracy_token/n_test_batches}\")\n",
        "      print(\n",
        "          f\"Eval accuracy (sequence level): {test_accuracy_seq/n_test_batches}\")\n",
        "      names = [\"parent\", \"role\", \"category\", \"noun type\", \"verb name\"]\n",
        "      for i in range(5):\n",
        "        print(f\"Eval accuracy (sequence level: {names[i]}): {test_accuracy_seq_detailed[i]/n_test_batches}\")\n",
        "      print(\"\")\n",
        "      return (test_accuracy_token/n_test_batches, test_accuracy_seq/n_test_batches)\n",
        "\n",
        "    steps_per_epoch = dataset_train.cardinality()\n",
        "    print(f\"steps_per_epoch: {steps_per_epoch}\")\n",
        "    repetition_metrics = []\n",
        "    for epoch in range(num_epochs):\n",
        "      steps_per_epoch_val = dataset_val_list[0].cardinality()\n",
        "      start = time.time()\n",
        "\n",
        "      train_loss.reset_states()\n",
        "      train_accuracy.reset_states()\n",
        "      eval_loss.reset_states()\n",
        "      eval_accuracy.reset_states()\n",
        "\n",
        "      for (batch, (inp, parent_ids,\n",
        "                   targ_parent, targ_role,\n",
        "                   targ_category, targ_noun_type,\n",
        "                   targ_verb_name)) in enumerate(dataset_train.take(steps_per_epoch)):\n",
        "        # print(\"inp:\" + str(inp.shape))\n",
        "        # print(\"targ:\" + str(targ.shape))\n",
        "        train_step(inp, parent_ids, targ_parent, targ_role,\n",
        "                   targ_category, targ_noun_type,\n",
        "                   targ_verb_name)\n",
        "        if batch % 100 == 0:\n",
        "          print(\"Epoch {} Batch {} Loss {:.4f} Accuracy {:.4f}\".format(\n",
        "              epoch + 1, batch, train_loss.result(), train_accuracy.result()))\n",
        "      print(\"Epoch {} Loss {:.4f} Accuracy {:.4f}\".format(\n",
        "          epoch + 1, train_loss.result(), train_accuracy.result()))\n",
        "      for (batch, (inp, parent_ids, targ_parent, targ_role, targ_category,\n",
        "                   targ_noun_type, targ_verb_name)) in enumerate(\n",
        "                       dataset_val_list[0].take(steps_per_epoch_val)):\n",
        "        eval_step(inp, parent_ids, targ_parent, targ_role, targ_category,\n",
        "                  targ_noun_type, targ_verb_name)\n",
        "      print(\"Epoch {} Eval Loss {:.4f} Eval Accuracy {:.4f}\".format(\n",
        "          epoch + 1, eval_loss.result(), eval_accuracy.result()))\n",
        "\n",
        "      print(\"Time taken for 1 epoch: {} secs\\n\".format(time.time() - start))\n",
        "\n",
        "      if ((epoch + 1) % save_results_every_n_epochs) == 0:\n",
        "        epoch_metrics = [epoch + 1]\n",
        "        for i in range(len(dataset_val_list)):\n",
        "          print(f\"------- Evaluation in dataset_val {i} -------\")\n",
        "          (acc_token, acc_seq) = evaluate_in_set(dataset_val_list[i], vocabs)\n",
        "          epoch_metrics.append(acc_token)\n",
        "          epoch_metrics.append(acc_seq)\n",
        "        repetition_metrics.append(epoch_metrics)\n",
        "\n",
        "    # Make sure we save the last epoch results if it is not a\n",
        "    # multiple of save_results_every_n_epochs:\n",
        "    if (num_epochs % save_results_every_n_epochs) != 0:\n",
        "      epoch_metrics = [num_epochs]\n",
        "      for i in range(len(dataset_val_list)):\n",
        "        print(f\"------- Evaluation in dataset_val {i} -------\")\n",
        "        (acc_token, acc_seq) = evaluate_in_set(dataset_val_list[i], vocabs)\n",
        "        epoch_metrics.append(acc_token)\n",
        "        epoch_metrics.append(acc_seq)\n",
        "      repetition_metrics.append(epoch_metrics)\n",
        "\n",
        "    repetition_metrics_l.append(repetition_metrics)\n",
        "\n",
        "  print(\"Raw repetition metrics:\")\n",
        "  for repetition_metrics in repetition_metrics_l:\n",
        "    for epoch_metrics in repetition_metrics:\n",
        "      print(epoch_metrics)\n",
        "\n",
        "  averages = repetition_metrics_l[0]\n",
        "  for i in range(1, len(repetition_metrics_l)):\n",
        "    for j in range(len(repetition_metrics_l[i])):\n",
        "      epoch_metrics = repetition_metrics_l[i][j]\n",
        "      # skip the epoch number:\n",
        "      for k in range(1, len(epoch_metrics)):\n",
        "        averages[j][k] += epoch_metrics[k]\n",
        "  for j in range(len(averages)):\n",
        "    for k in range(1, len(averages[j])):\n",
        "      averages[j][k] /= len(repetition_metrics_l)\n",
        "\n",
        "  print(\"Average repetition metrics:\")\n",
        "  for average_metrics in averages:\n",
        "    print(\"\\t\".join(map(str, average_metrics)))\n",
        "\n",
        "  print(f\"Transformer params: {transformer.count_params()}\")\n",
        "  transformer.detailed_param_count()\n",
        "\n",
        "  return averages, transformer, test_sets_names\n",
        "\n",
        "\n",
        "def pretty_print_results(result, test_sets_names):\n",
        "  \"\"\"Print table of results twice: once space aligned, once comma separated.\n",
        "\n",
        "  results is a list of numbers:\n",
        "  [epoch, pairs of (acc_token, acc_seq) for each of the 24 test sets]\n",
        "  \"\"\"\n",
        "  assert len(result) == 2*len(test_sets_names) + 1\n",
        "  print(\"%-20s  acc_token  acc_seq\"%(\"\"))\n",
        "  for i in range(len(test_sets_names)):\n",
        "    print(\"%-20s  %.3f      %.3f\"%(test_sets_names[i][:20], result[1+2*i], result[1+2*i+1]))\n",
        "  print()\n",
        "  print(\",acc_token,acc_seq\")\n",
        "  for i in range(len(test_sets_names)):\n",
        "    print(\"%s,%.3f,%.3f\"%(test_sets_names[i][:20], result[1+2*i], result[1+2*i+1]))\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "cellView": "form",
        "id": "IDtYYUDRKCfM"
      },
      "outputs": [],
      "source": [
        "#@title Experiment - base\n",
        "\n",
        "# Run the different experiments:\n",
        "# parameters are:\n",
        "# - dataset\n",
        "# - num_layers, d_model, dff, num_heads\n",
        "# - position_encoding, parent_encoding, share_layer_weights\n",
        "# - num_epochs\n",
        "# - optional keyword parameters:\n",
        "#     num_repetitions=1,\n",
        "#     save_results_every_n_epochs=None,\n",
        "#     batch_size=64\n",
        "\n",
        "# Example:\n",
        "results, transformer, test_sets_names = run_evaluation(\n",
        "    'cogs_tag',\n",
        "    2, 64, 256, 4,\n",
        "    ABSOLUTE_SINUSOIDAL, PARENT_ABSOLUTE, False,\n",
        "    16,\n",
        "    num_repetitions=1, save_results_every_n_epochs=None, batch_size=64)\n",
        "\n",
        "pretty_print_results(results[-1], test_sets_names)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "cellView": "form",
        "id": "fSxVYkhxLmNX"
      },
      "outputs": [],
      "source": [
        "#@title Experiment - tuned\n",
        "\n",
        "results, transformer, test_sets_names = run_evaluation(\n",
        "    'cogs_tag',\n",
        "    2, 64, 256, 4,\n",
        "    RELATIVE16_BIAS, PARENT_ATTENTION, True,\n",
        "    16,\n",
        "    num_repetitions=1, save_results_every_n_epochs=None, batch_size=64)\n",
        "\n",
        "pretty_print_results(results[-1], test_sets_names)"
      ]
    }
  ],
  "metadata": {
    "colab": {
      "collapsed_sections": [],
      "last_runtime": {
        "build_target": "//learning/deepmind/public/tools/ml_python:ml_notebook",
        "kind": "private"
      },
      "name": "cogs_tagging.ipynb",
      "provenance": [
        {
          "file_id": "1yoke9fHsL6eHFq3MmVRoq82hpo7xHv1v",
          "timestamp": 1601564673821
        },
        {
          "file_id": "1I41hx_VfIBxmuXygVEwlZ277A5HHp6ze",
          "timestamp": 1600681566885
        }
      ]
    },
    "kernelspec": {
      "display_name": "Python 3",
      "name": "python3"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 0
}
