{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "name": "QAFFN.ipynb",
      "version": "0.3.2",
      "provenance": [],
      "collapsed_sections": []
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "accelerator": "TPU"
  },
  "cells": [
    {
      "metadata": {
        "id": "fY65v3Khb_IB",
        "colab_type": "code",
        "outputId": "b53e4e6e-4db6-4cd9-be21-88b80c0af056",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        }
      },
      "cell_type": "code",
      "source": [
        "from google.colab import drive\n",
        "drive.mount('/content/gdrive')"
      ],
      "execution_count": 1,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Drive already mounted at /content/gdrive; to attempt to forcibly remount, call drive.mount(\"/content/gdrive\", force_remount=True).\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "metadata": {
        "id": "ciH0K7i2cMVU",
        "colab_type": "code",
        "outputId": "613cde27-97b6-436a-d9f2-073daa0bd1d3",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 377
        }
      },
      "cell_type": "code",
      "source": [
        "# install tf 2.0\n",
        "from __future__ import absolute_import, division, print_function, unicode_literals\n",
        "\n",
        "!pip install tensorflow-gpu==2.0.0-alpha0\n",
        "import tensorflow as tf\n",
        "\n",
        "print(tf.__version__)"
      ],
      "execution_count": 2,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Requirement already satisfied: tensorflow-gpu==2.0.0-alpha0 in /usr/local/lib/python3.6/dist-packages (2.0.0a0)\n",
            "Requirement already satisfied: gast>=0.2.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==2.0.0-alpha0) (0.2.2)\n",
            "Requirement already satisfied: google-pasta>=0.1.2 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==2.0.0-alpha0) (0.1.5)\n",
            "Requirement already satisfied: absl-py>=0.7.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==2.0.0-alpha0) (0.7.1)\n",
            "Requirement already satisfied: keras-preprocessing>=1.0.5 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==2.0.0-alpha0) (1.0.9)\n",
            "Requirement already satisfied: six>=1.10.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==2.0.0-alpha0) (1.11.0)\n",
            "Requirement already satisfied: tb-nightly<1.14.0a20190302,>=1.14.0a20190301 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==2.0.0-alpha0) (1.14.0a20190301)\n",
            "Requirement already satisfied: numpy<2.0,>=1.14.5 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==2.0.0-alpha0) (1.16.2)\n",
            "Requirement already satisfied: keras-applications>=1.0.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==2.0.0-alpha0) (1.0.7)\n",
            "Requirement already satisfied: protobuf>=3.6.1 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==2.0.0-alpha0) (3.7.1)\n",
            "Requirement already satisfied: tf-estimator-nightly<1.14.0.dev2019030116,>=1.14.0.dev2019030115 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==2.0.0-alpha0) (1.14.0.dev2019030115)\n",
            "Requirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==2.0.0-alpha0) (1.1.0)\n",
            "Requirement already satisfied: grpcio>=1.8.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==2.0.0-alpha0) (1.15.0)\n",
            "Requirement already satisfied: wheel>=0.26 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==2.0.0-alpha0) (0.33.1)\n",
            "Requirement already satisfied: astor>=0.6.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==2.0.0-alpha0) (0.7.1)\n",
            "Requirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.6/dist-packages (from tb-nightly<1.14.0a20190302,>=1.14.0a20190301->tensorflow-gpu==2.0.0-alpha0) (0.15.2)\n",
            "Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.6/dist-packages (from tb-nightly<1.14.0a20190302,>=1.14.0a20190301->tensorflow-gpu==2.0.0-alpha0) (3.1)\n",
            "Requirement already satisfied: h5py in /usr/local/lib/python3.6/dist-packages (from keras-applications>=1.0.6->tensorflow-gpu==2.0.0-alpha0) (2.8.0)\n",
            "Requirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from protobuf>=3.6.1->tensorflow-gpu==2.0.0-alpha0) (40.9.0)\n",
            "2.0.0-alpha0\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "metadata": {
        "id": "LNgiSBHJcP81",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "import os\n",
        "import pandas as pd\n",
        "from sklearn.model_selection import train_test_split\n",
        "import numpy as np\n",
        "import tensorflow.keras.backend as K"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "0RwAUbbBcTBk",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "# create dataset\n",
        "\n",
        "def create_generator_for_ffn(\n",
        "        data_dir,\n",
        "        file_list=[\n",
        "            \"ehealthforumQAs.csv\",\n",
        "            \"icliniqQAs.csv\",\n",
        "            \"questionDoctorQAs.csv\",\n",
        "            \"webmdQAs.csv\"],\n",
        "        mode='train'):\n",
        "\n",
        "    for file_name in file_list:\n",
        "        full_file_path = os.path.join(data_dir, file_name)\n",
        "        if not os.path.exists(full_file_path):\n",
        "            raise FileNotFoundError(\"File %s not found\" % full_file_path)\n",
        "        df = pd.read_csv(full_file_path)\n",
        "\n",
        "        # so train test split\n",
        "        if mode == 'train':\n",
        "            df, _ = train_test_split(df, test_size=0.2)\n",
        "        else:\n",
        "            _, df = train_test_split(df, test_size=0.2)\n",
        "\n",
        "        for _, row in df.iterrows():\n",
        "            q_vectors = np.fromstring(row.question_bert.replace(\n",
        "                '[[', '').replace(']]', ''), sep=' ')\n",
        "            a_vectors = np.fromstring(row.answer_bert.replace(\n",
        "                '[[', '').replace(']]', ''), sep=' ')\n",
        "            if mode == 'train':\n",
        "                yield {\n",
        "                    \"q_vectors\": q_vectors,\n",
        "                    \"a_vectors\": a_vectors,\n",
        "                    \"labels\": 1\n",
        "                }\n",
        "            else:\n",
        "                yield {\n",
        "                    \"q_vectors\": q_vectors,\n",
        "                    \"a_vectors\": a_vectors,\n",
        "                }\n",
        "\n",
        "def create_dataset_for_ffn(\n",
        "        data_dir,\n",
        "        file_list=[\n",
        "            \"ehealthforumQAs.csv\",\n",
        "            \"icliniqQAs.csv\",\n",
        "            \"questionDoctorQAs.csv\",\n",
        "            \"webmdQAs.csv\",\n",
        "            \"healthtapQAs.csv\"],\n",
        "        mode='train',\n",
        "        hidden_size=768,\n",
        "        shuffle_buffer=10000,\n",
        "        prefetch=128,\n",
        "        batch_size=32):\n",
        "\n",
        "    def gen(): return create_generator_for_ffn(\n",
        "        data_dir=data_dir,\n",
        "        file_list=file_list,\n",
        "        mode=mode)\n",
        "\n",
        "    output_types = {\n",
        "        'q_vectors': tf.float32,\n",
        "        'a_vectors': tf.float32\n",
        "    }\n",
        "\n",
        "    output_shapes = {\n",
        "        'q_vectors': [hidden_size],\n",
        "        'a_vectors': [hidden_size],\n",
        "    }\n",
        "\n",
        "    if mode == 'train':\n",
        "        output_types.update({'labels': tf.int32})\n",
        "        output_shapes.update({'labels': []})\n",
        "\n",
        "    dataset = tf.data.Dataset.from_generator(\n",
        "        generator=gen,\n",
        "        output_types=output_types,\n",
        "        output_shapes=output_shapes\n",
        "    )\n",
        "    if mode == 'train':\n",
        "        dataset = dataset.shuffle(shuffle_buffer)\n",
        "\n",
        "    dataset = dataset.prefetch(prefetch)\n",
        "\n",
        "    dataset = dataset.batch(batch_size)\n",
        "    return dataset"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "eeDSqHAJglm-",
        "colab_type": "code",
        "outputId": "caa02c6d-43c0-43dd-d8cc-c902a85879b9",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 275
        }
      },
      "cell_type": "code",
      "source": [
        "# get bert embedded dataset\n",
        "d = create_dataset_for_ffn(data_dir='/content/gdrive/My Drive/mqa-biobert', batch_size=64)"
      ],
      "execution_count": 5,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "WARNING: Logging before flag parsing goes to stderr.\n",
            "W0417 05:52:08.727642 139820326467456 deprecation.py:323] From /usr/local/lib/python3.6/dist-packages/tensorflow/python/data/ops/dataset_ops.py:410: py_func (from tensorflow.python.ops.script_ops) is deprecated and will be removed in a future version.\n",
            "Instructions for updating:\n",
            "tf.py_func is deprecated in TF V2. Instead, there are two\n",
            "    options available in V2.\n",
            "    - tf.py_function takes a python function which manipulates tf eager\n",
            "    tensors instead of numpy arrays. It's easy to convert a tf eager tensor to\n",
            "    an ndarray (just call tensor.numpy()) but having access to eager tensors\n",
            "    means `tf.py_function`s can use accelerators such as GPUs as well as\n",
            "    being differentiable using a gradient tape.\n",
            "    - tf.numpy_function maintains the semantics of the deprecated tf.py_func\n",
            "    (it is not differentiable, and manipulates numpy arrays). It drops the\n",
            "    stateful argument making all functions stateful.\n",
            "    \n"
          ],
          "name": "stderr"
        }
      ]
    },
    {
      "metadata": {
        "id": "24e4mzkehHBO",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "# QA pair ffn layer\n",
        "\n",
        "\n",
        "class QAFFN(tf.keras.layers.Layer):\n",
        "    def __init__(\n",
        "            self,\n",
        "            hidden_size=768,\n",
        "            dropout=0.1,\n",
        "            residual=True,\n",
        "            activation=tf.keras.layers.ReLU(),\n",
        "            name='QAFFN'):\n",
        "        \"\"\"Feed-forward layers for question and answer.\n",
        "        The input to this layer should be a two-elements tuple (q_embeddnig, a_embedding).\n",
        "        The elements of tuple should be None or a tensor. \n",
        "\n",
        "        In training, we should input both question embedding and answer embedding.\n",
        "\n",
        "        In pre-inference, we should pass answer embedding only and save the embedding.\n",
        "\n",
        "        In inference, we should pass the question embedding only and do a vector similarity search.\n",
        "\n",
        "        Keyword Arguments:\n",
        "            hidden_size {int} -- hidden size of feed-forward network (default: {768})\n",
        "            dropout {float} -- dropout rate (default: {0.1})\n",
        "            residual {bool} -- whether to use residual connection (default: {True})\n",
        "            activation {[type]} -- activation function (default: {tf.keras.layers.ReLU()})\n",
        "        \"\"\"\n",
        "\n",
        "        super(QAFFN, self).__init__(name=name)\n",
        "        self.hidden_size = hidden_size\n",
        "        self.dropout = dropout\n",
        "        self.residual = residual\n",
        "        self.activation = activation\n",
        "        self.q_ffn = tf.keras.layers.Dense(\n",
        "            units=hidden_size,\n",
        "            use_bias=True,\n",
        "            activation=activation\n",
        "        )\n",
        "\n",
        "        self.a_ffn = tf.keras.layers.Dense(\n",
        "            units=hidden_size,\n",
        "            use_bias=True,\n",
        "            activation=activation\n",
        "        )\n",
        "        self.q_ffn.build([1, self.hidden_size])\n",
        "        self.a_ffn.build([1, self.hidden_size])\n",
        "\n",
        "    @tf.function\n",
        "    def _bert_to_ffn(self, bert_embedding, ffn_layer):\n",
        "        if bert_embedding is not None:\n",
        "            ffn_embedding = ffn_layer(bert_embedding)\n",
        "            if self.dropout > 0:\n",
        "                ffn_embedding = tf.keras.layers.Dropout(\n",
        "                    self.dropout)(ffn_embedding)\n",
        "\n",
        "            if self.residual:\n",
        "                try:\n",
        "                    ffn_embedding += bert_embedding\n",
        "                except:\n",
        "                    raise ValueError('Incompatible shape for res connection, got {0}, {1}'.format(\n",
        "                        ffn_embedding.shape, bert_embedding.shape))\n",
        "        else:\n",
        "            ffn_embedding = None\n",
        "\n",
        "        return ffn_embedding\n",
        "\n",
        "    def call(self, inputs):\n",
        "        q_bert_embedding, a_bert_embedding = inputs\n",
        "        q_ffn_embedding = self._bert_to_ffn(q_bert_embedding, self.q_ffn)\n",
        "        a_ffn_embedding = self._bert_to_ffn(a_bert_embedding, self.a_ffn)\n",
        "        return q_ffn_embedding, a_ffn_embedding\n",
        "\n",
        "\n",
        "@tf.function\n",
        "def qa_pair_loss(q_embedding, a_embedding):\n",
        "    if q_embedding is not None and a_embedding is not None:\n",
        "        q_embedding = q_embedding / \\\n",
        "            tf.norm(q_embedding, axis=-1, keepdims=True)\n",
        "        a_embedding = a_embedding / \\\n",
        "            tf.norm(a_embedding, axis=-1, keepdims=True)\n",
        "        similarity_vector = tf.reshape(\n",
        "            tf.matmul(q_embedding, a_embedding, transpose_b=True), [-1, ])\n",
        "        target = tf.reshape(tf.eye(q_embedding.shape[0]), [-1, ])\n",
        "        loss = tf.keras.losses.binary_crossentropy(target, similarity_vector)\n",
        "        return loss\n",
        "    else:\n",
        "        return 0\n",
        "\n",
        "\n",
        "class MedicalQAModel(tf.keras.Model):\n",
        "    def __init__(self, name=''):\n",
        "        super(MedicalQAModel, self).__init__(name=name)\n",
        "        self.qa_ffn_layer = QAFFN()\n",
        "\n",
        "    def call(self, inputs):\n",
        "        q_bert_embedding = inputs.get('q_vectors')\n",
        "        a_bert_embedding = inputs.get('a_vectors')\n",
        "\n",
        "        self.add_loss(qa_pair_loss(q_bert_embedding, a_bert_embedding))\n",
        "\n",
        "        return self.qa_ffn_layer((q_bert_embedding, a_bert_embedding))\n",
        "\n",
        "def fake_loss(y_true, y_pred):\n",
        "    return 0.0"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "ScjdlvR-kj95",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "medical_qa_model = MedicalQAModel()\n",
        "optimizer = tf.keras.optimizers.Adam()"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "a3HHnd_dKU2a",
        "colab_type": "code",
        "outputId": "f92a2937-2471-4b83-d378-95562cba7a28",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 3913
        }
      },
      "cell_type": "code",
      "source": [
        "epochs=10\n",
        "loss_metric = tf.keras.metrics.Mean()\n",
        "K.set_learning_phase(1)\n",
        "\n",
        "# Iterate over epochs.\n",
        "for epoch in range(epochs):\n",
        "  print('Start of epoch %d' % (epoch,))\n",
        "\n",
        "  # Iterate over the batches of the dataset.\n",
        "  for step, x_batch_train in enumerate(d):\n",
        "    with tf.GradientTape() as tape:\n",
        "      q_embedding, a_embedding = medical_qa_model(x_batch_train)\n",
        "      # Compute reconstruction loss\n",
        "      loss = qa_pair_loss(q_embedding, a_embedding)\n",
        "\n",
        "    grads = tape.gradient(loss, medical_qa_model.trainable_variables)\n",
        "    optimizer.apply_gradients(zip(grads, medical_qa_model.trainable_variables))\n",
        "    \n",
        "    loss_metric(loss)\n",
        "\n",
        "\n",
        "    if step % 100 == 0:\n",
        "      print('step %s: mean loss = %s' % (step, loss_metric.result()))\n"
      ],
      "execution_count": 9,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Start of epoch 0\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "W0417 05:52:46.946611 139820326467456 tf_logging.py:161] Entity <bound method QAFFN._bert_to_ffn of <tensorflow.python.eager.function.TfMethodTarget object at 0x7f29f59fed68>> could not be transformed and will be staged without change. Error details can be found in the logs when running with the env variable AUTOGRAPH_VERBOSITY >= 1. Please report this to the AutoGraph team. Cause: KeyError during conversion: LIVE_VARS_IN\n"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "WARNING: Entity <bound method QAFFN._bert_to_ffn of <tensorflow.python.eager.function.TfMethodTarget object at 0x7f29f59fed68>> could not be transformed and will be staged without change. Error details can be found in the logs when running with the env variable AUTOGRAPH_VERBOSITY >= 1. Please report this to the AutoGraph team. Cause: KeyError during conversion: LIVE_VARS_IN\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "W0417 05:52:47.323804 139820326467456 tf_logging.py:161] Entity <bound method QAFFN._bert_to_ffn of <tensorflow.python.eager.function.TfMethodTarget object at 0x7f29f59fed68>> could not be transformed and will be staged without change. Error details can be found in the logs when running with the env variable AUTOGRAPH_VERBOSITY >= 1. Please report this to the AutoGraph team. Cause: KeyError during conversion: LIVE_VARS_IN\n"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "WARNING: Entity <bound method QAFFN._bert_to_ffn of <tensorflow.python.eager.function.TfMethodTarget object at 0x7f29f59fed68>> could not be transformed and will be staged without change. Error details can be found in the logs when running with the env variable AUTOGRAPH_VERBOSITY >= 1. Please report this to the AutoGraph team. Cause: KeyError during conversion: LIVE_VARS_IN\n",
            "step 0: mean loss = tf.Tensor(1.1809258, shape=(), dtype=float32)\n",
            "step 100: mean loss = tf.Tensor(0.15270387, shape=(), dtype=float32)\n",
            "step 200: mean loss = tf.Tensor(0.11724205, shape=(), dtype=float32)\n",
            "step 300: mean loss = tf.Tensor(0.10493506, shape=(), dtype=float32)\n",
            "step 400: mean loss = tf.Tensor(0.09857195, shape=(), dtype=float32)\n",
            "step 500: mean loss = tf.Tensor(0.09459527, shape=(), dtype=float32)\n",
            "step 600: mean loss = tf.Tensor(0.09184392, shape=(), dtype=float32)\n",
            "step 700: mean loss = tf.Tensor(0.08978085, shape=(), dtype=float32)\n",
            "step 800: mean loss = tf.Tensor(0.08816196, shape=(), dtype=float32)\n",
            "step 900: mean loss = tf.Tensor(0.0868286, shape=(), dtype=float32)\n",
            "step 1000: mean loss = tf.Tensor(0.0857477, shape=(), dtype=float32)\n",
            "step 1100: mean loss = tf.Tensor(0.08482037, shape=(), dtype=float32)\n",
            "step 1200: mean loss = tf.Tensor(0.083998494, shape=(), dtype=float32)\n",
            "step 1300: mean loss = tf.Tensor(0.08327168, shape=(), dtype=float32)\n",
            "step 1400: mean loss = tf.Tensor(0.08260603, shape=(), dtype=float32)\n",
            "step 1500: mean loss = tf.Tensor(0.082012184, shape=(), dtype=float32)\n",
            "step 1600: mean loss = tf.Tensor(0.081461184, shape=(), dtype=float32)\n",
            "step 1700: mean loss = tf.Tensor(0.08095804, shape=(), dtype=float32)\n",
            "step 1800: mean loss = tf.Tensor(0.080477305, shape=(), dtype=float32)\n",
            "step 1900: mean loss = tf.Tensor(0.08004208, shape=(), dtype=float32)\n",
            "step 2000: mean loss = tf.Tensor(0.07964912, shape=(), dtype=float32)\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "W0417 05:56:58.200734 139820326467456 tf_logging.py:161] Entity <bound method QAFFN._bert_to_ffn of <tensorflow.python.eager.function.TfMethodTarget object at 0x7f29f59fed68>> could not be transformed and will be staged without change. Error details can be found in the logs when running with the env variable AUTOGRAPH_VERBOSITY >= 1. Please report this to the AutoGraph team. Cause: KeyError during conversion: LIVE_VARS_IN\n",
            "W0417 05:56:58.393811 139820326467456 tf_logging.py:161] Entity <bound method QAFFN._bert_to_ffn of <tensorflow.python.eager.function.TfMethodTarget object at 0x7f29f59fed68>> could not be transformed and will be staged without change. Error details can be found in the logs when running with the env variable AUTOGRAPH_VERBOSITY >= 1. Please report this to the AutoGraph team. Cause: KeyError during conversion: LIVE_VARS_IN\n"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "WARNING: Entity <bound method QAFFN._bert_to_ffn of <tensorflow.python.eager.function.TfMethodTarget object at 0x7f29f59fed68>> could not be transformed and will be staged without change. Error details can be found in the logs when running with the env variable AUTOGRAPH_VERBOSITY >= 1. Please report this to the AutoGraph team. Cause: KeyError during conversion: LIVE_VARS_IN\n",
            "WARNING: Entity <bound method QAFFN._bert_to_ffn of <tensorflow.python.eager.function.TfMethodTarget object at 0x7f29f59fed68>> could not be transformed and will be staged without change. Error details can be found in the logs when running with the env variable AUTOGRAPH_VERBOSITY >= 1. Please report this to the AutoGraph team. Cause: KeyError during conversion: LIVE_VARS_IN\n",
            "Start of epoch 1\n",
            "step 0: mean loss = tf.Tensor(0.08096242, shape=(), dtype=float32)\n",
            "step 100: mean loss = tf.Tensor(0.08075694, shape=(), dtype=float32)\n",
            "step 200: mean loss = tf.Tensor(0.08046189, shape=(), dtype=float32)\n",
            "step 300: mean loss = tf.Tensor(0.08019418, shape=(), dtype=float32)\n",
            "step 400: mean loss = tf.Tensor(0.07988028, shape=(), dtype=float32)\n",
            "step 500: mean loss = tf.Tensor(0.07955853, shape=(), dtype=float32)\n",
            "step 600: mean loss = tf.Tensor(0.07924241, shape=(), dtype=float32)\n",
            "step 700: mean loss = tf.Tensor(0.07894271, shape=(), dtype=float32)\n",
            "step 800: mean loss = tf.Tensor(0.078649156, shape=(), dtype=float32)\n",
            "step 900: mean loss = tf.Tensor(0.07837875, shape=(), dtype=float32)\n",
            "step 1000: mean loss = tf.Tensor(0.07810703, shape=(), dtype=float32)\n",
            "step 1100: mean loss = tf.Tensor(0.077862464, shape=(), dtype=float32)\n",
            "step 1200: mean loss = tf.Tensor(0.07763252, shape=(), dtype=float32)\n",
            "step 1300: mean loss = tf.Tensor(0.07741066, shape=(), dtype=float32)\n",
            "step 1400: mean loss = tf.Tensor(0.077188246, shape=(), dtype=float32)\n",
            "step 1500: mean loss = tf.Tensor(0.07698909, shape=(), dtype=float32)\n",
            "step 1600: mean loss = tf.Tensor(0.076790966, shape=(), dtype=float32)\n",
            "step 1700: mean loss = tf.Tensor(0.07660215, shape=(), dtype=float32)\n",
            "step 1800: mean loss = tf.Tensor(0.07641547, shape=(), dtype=float32)\n",
            "step 1900: mean loss = tf.Tensor(0.076231614, shape=(), dtype=float32)\n",
            "step 2000: mean loss = tf.Tensor(0.07605362, shape=(), dtype=float32)\n",
            "Start of epoch 2\n",
            "step 0: mean loss = tf.Tensor(0.0767932, shape=(), dtype=float32)\n",
            "step 100: mean loss = tf.Tensor(0.07679051, shape=(), dtype=float32)\n",
            "step 200: mean loss = tf.Tensor(0.07670503, shape=(), dtype=float32)\n",
            "step 300: mean loss = tf.Tensor(0.076581396, shape=(), dtype=float32)\n",
            "step 400: mean loss = tf.Tensor(0.07643332, shape=(), dtype=float32)\n",
            "step 500: mean loss = tf.Tensor(0.07633478, shape=(), dtype=float32)\n",
            "step 600: mean loss = tf.Tensor(0.076216266, shape=(), dtype=float32)\n",
            "step 700: mean loss = tf.Tensor(0.07608671, shape=(), dtype=float32)\n",
            "step 800: mean loss = tf.Tensor(0.07599512, shape=(), dtype=float32)\n",
            "step 900: mean loss = tf.Tensor(0.07588143, shape=(), dtype=float32)\n",
            "step 1000: mean loss = tf.Tensor(0.0757649, shape=(), dtype=float32)\n",
            "step 1100: mean loss = tf.Tensor(0.07565895, shape=(), dtype=float32)\n",
            "step 1200: mean loss = tf.Tensor(0.07554434, shape=(), dtype=float32)\n",
            "step 1300: mean loss = tf.Tensor(0.0754274, shape=(), dtype=float32)\n",
            "step 1400: mean loss = tf.Tensor(0.07530572, shape=(), dtype=float32)\n",
            "step 1500: mean loss = tf.Tensor(0.07519139, shape=(), dtype=float32)\n",
            "step 1600: mean loss = tf.Tensor(0.075078666, shape=(), dtype=float32)\n",
            "step 1700: mean loss = tf.Tensor(0.07496844, shape=(), dtype=float32)\n",
            "step 1800: mean loss = tf.Tensor(0.074857675, shape=(), dtype=float32)\n",
            "step 1900: mean loss = tf.Tensor(0.0747596, shape=(), dtype=float32)\n",
            "step 2000: mean loss = tf.Tensor(0.07465533, shape=(), dtype=float32)\n",
            "Start of epoch 3\n",
            "step 0: mean loss = tf.Tensor(0.07533289, shape=(), dtype=float32)\n",
            "step 100: mean loss = tf.Tensor(0.07538568, shape=(), dtype=float32)\n",
            "step 200: mean loss = tf.Tensor(0.0753596, shape=(), dtype=float32)\n",
            "step 300: mean loss = tf.Tensor(0.07530852, shape=(), dtype=float32)\n",
            "step 400: mean loss = tf.Tensor(0.07523888, shape=(), dtype=float32)\n",
            "step 500: mean loss = tf.Tensor(0.07516227, shape=(), dtype=float32)\n",
            "step 600: mean loss = tf.Tensor(0.0750814, shape=(), dtype=float32)\n",
            "step 700: mean loss = tf.Tensor(0.074998826, shape=(), dtype=float32)\n",
            "step 800: mean loss = tf.Tensor(0.07492359, shape=(), dtype=float32)\n",
            "step 900: mean loss = tf.Tensor(0.07484382, shape=(), dtype=float32)\n",
            "step 1000: mean loss = tf.Tensor(0.07476335, shape=(), dtype=float32)\n",
            "step 1100: mean loss = tf.Tensor(0.074681096, shape=(), dtype=float32)\n",
            "step 1200: mean loss = tf.Tensor(0.07459555, shape=(), dtype=float32)\n",
            "step 1300: mean loss = tf.Tensor(0.07451727, shape=(), dtype=float32)\n",
            "step 1400: mean loss = tf.Tensor(0.07443832, shape=(), dtype=float32)\n",
            "step 1500: mean loss = tf.Tensor(0.074355766, shape=(), dtype=float32)\n",
            "step 1600: mean loss = tf.Tensor(0.074278854, shape=(), dtype=float32)\n",
            "step 1700: mean loss = tf.Tensor(0.07420379, shape=(), dtype=float32)\n",
            "step 1800: mean loss = tf.Tensor(0.0741254, shape=(), dtype=float32)\n",
            "step 1900: mean loss = tf.Tensor(0.074064806, shape=(), dtype=float32)\n",
            "step 2000: mean loss = tf.Tensor(0.073993355, shape=(), dtype=float32)\n",
            "Start of epoch 4\n",
            "step 0: mean loss = tf.Tensor(0.07424095, shape=(), dtype=float32)\n",
            "step 100: mean loss = tf.Tensor(0.07422554, shape=(), dtype=float32)\n",
            "step 200: mean loss = tf.Tensor(0.07418529, shape=(), dtype=float32)\n",
            "step 300: mean loss = tf.Tensor(0.074128, shape=(), dtype=float32)\n",
            "step 400: mean loss = tf.Tensor(0.07406247, shape=(), dtype=float32)\n",
            "step 500: mean loss = tf.Tensor(0.07399374, shape=(), dtype=float32)\n",
            "step 600: mean loss = tf.Tensor(0.07392472, shape=(), dtype=float32)\n",
            "step 700: mean loss = tf.Tensor(0.07385809, shape=(), dtype=float32)\n",
            "step 800: mean loss = tf.Tensor(0.07379762, shape=(), dtype=float32)\n",
            "step 900: mean loss = tf.Tensor(0.07376659, shape=(), dtype=float32)\n",
            "step 1000: mean loss = tf.Tensor(0.07371596, shape=(), dtype=float32)\n",
            "step 1100: mean loss = tf.Tensor(0.07365778, shape=(), dtype=float32)\n",
            "step 1200: mean loss = tf.Tensor(0.073600285, shape=(), dtype=float32)\n",
            "step 1300: mean loss = tf.Tensor(0.07354255, shape=(), dtype=float32)\n",
            "step 1400: mean loss = tf.Tensor(0.07348821, shape=(), dtype=float32)\n",
            "step 1500: mean loss = tf.Tensor(0.07343268, shape=(), dtype=float32)\n",
            "step 1600: mean loss = tf.Tensor(0.07337533, shape=(), dtype=float32)\n",
            "step 1700: mean loss = tf.Tensor(0.07331689, shape=(), dtype=float32)\n",
            "step 1800: mean loss = tf.Tensor(0.07325946, shape=(), dtype=float32)\n",
            "step 1900: mean loss = tf.Tensor(0.07320299, shape=(), dtype=float32)\n",
            "step 2000: mean loss = tf.Tensor(0.073145285, shape=(), dtype=float32)\n",
            "Start of epoch 5\n",
            "step 0: mean loss = tf.Tensor(0.07343255, shape=(), dtype=float32)\n",
            "step 100: mean loss = tf.Tensor(0.07344019, shape=(), dtype=float32)\n",
            "step 200: mean loss = tf.Tensor(0.073412985, shape=(), dtype=float32)\n",
            "step 300: mean loss = tf.Tensor(0.07337382, shape=(), dtype=float32)\n",
            "step 400: mean loss = tf.Tensor(0.073323466, shape=(), dtype=float32)\n",
            "step 500: mean loss = tf.Tensor(0.07326907, shape=(), dtype=float32)\n",
            "step 600: mean loss = tf.Tensor(0.07321252, shape=(), dtype=float32)\n",
            "step 700: mean loss = tf.Tensor(0.07315961, shape=(), dtype=float32)\n",
            "step 800: mean loss = tf.Tensor(0.07310472, shape=(), dtype=float32)\n",
            "step 900: mean loss = tf.Tensor(0.07305504, shape=(), dtype=float32)\n",
            "step 1000: mean loss = tf.Tensor(0.073003314, shape=(), dtype=float32)\n",
            "step 1100: mean loss = tf.Tensor(0.07294859, shape=(), dtype=float32)\n",
            "step 1200: mean loss = tf.Tensor(0.07289676, shape=(), dtype=float32)\n",
            "step 1300: mean loss = tf.Tensor(0.07284937, shape=(), dtype=float32)\n",
            "step 1400: mean loss = tf.Tensor(0.07280471, shape=(), dtype=float32)\n",
            "step 1500: mean loss = tf.Tensor(0.07275206, shape=(), dtype=float32)\n",
            "step 1600: mean loss = tf.Tensor(0.072701514, shape=(), dtype=float32)\n",
            "step 1700: mean loss = tf.Tensor(0.072651446, shape=(), dtype=float32)\n",
            "step 1800: mean loss = tf.Tensor(0.072603144, shape=(), dtype=float32)\n",
            "step 1900: mean loss = tf.Tensor(0.07256548, shape=(), dtype=float32)\n",
            "step 2000: mean loss = tf.Tensor(0.07251801, shape=(), dtype=float32)\n",
            "Start of epoch 6\n",
            "step 0: mean loss = tf.Tensor(0.072761916, shape=(), dtype=float32)\n",
            "step 100: mean loss = tf.Tensor(0.07275936, shape=(), dtype=float32)\n",
            "step 200: mean loss = tf.Tensor(0.07273368, shape=(), dtype=float32)\n",
            "step 300: mean loss = tf.Tensor(0.072698906, shape=(), dtype=float32)\n",
            "step 400: mean loss = tf.Tensor(0.07267, shape=(), dtype=float32)\n",
            "step 500: mean loss = tf.Tensor(0.07264605, shape=(), dtype=float32)\n",
            "step 600: mean loss = tf.Tensor(0.0726126, shape=(), dtype=float32)\n",
            "step 700: mean loss = tf.Tensor(0.072574, shape=(), dtype=float32)\n",
            "step 800: mean loss = tf.Tensor(0.07254777, shape=(), dtype=float32)\n",
            "step 900: mean loss = tf.Tensor(0.07251778, shape=(), dtype=float32)\n",
            "step 1000: mean loss = tf.Tensor(0.07248365, shape=(), dtype=float32)\n",
            "step 1100: mean loss = tf.Tensor(0.072446436, shape=(), dtype=float32)\n",
            "step 1200: mean loss = tf.Tensor(0.07240679, shape=(), dtype=float32)\n",
            "step 1300: mean loss = tf.Tensor(0.0723703, shape=(), dtype=float32)\n",
            "step 1400: mean loss = tf.Tensor(0.07233111, shape=(), dtype=float32)\n",
            "step 1500: mean loss = tf.Tensor(0.07229371, shape=(), dtype=float32)\n",
            "step 1600: mean loss = tf.Tensor(0.07225527, shape=(), dtype=float32)\n",
            "step 1700: mean loss = tf.Tensor(0.0722224, shape=(), dtype=float32)\n",
            "step 1800: mean loss = tf.Tensor(0.07220871, shape=(), dtype=float32)\n",
            "step 1900: mean loss = tf.Tensor(0.07217993, shape=(), dtype=float32)\n",
            "step 2000: mean loss = tf.Tensor(0.07214867, shape=(), dtype=float32)\n",
            "Start of epoch 7\n",
            "step 0: mean loss = tf.Tensor(0.07229304, shape=(), dtype=float32)\n",
            "step 100: mean loss = tf.Tensor(0.072290584, shape=(), dtype=float32)\n",
            "step 200: mean loss = tf.Tensor(0.07227475, shape=(), dtype=float32)\n",
            "step 300: mean loss = tf.Tensor(0.07224908, shape=(), dtype=float32)\n",
            "step 400: mean loss = tf.Tensor(0.072214596, shape=(), dtype=float32)\n",
            "step 500: mean loss = tf.Tensor(0.072178796, shape=(), dtype=float32)\n",
            "step 600: mean loss = tf.Tensor(0.07214183, shape=(), dtype=float32)\n",
            "step 700: mean loss = tf.Tensor(0.07210659, shape=(), dtype=float32)\n",
            "step 800: mean loss = tf.Tensor(0.07206949, shape=(), dtype=float32)\n",
            "step 900: mean loss = tf.Tensor(0.07203327, shape=(), dtype=float32)\n",
            "step 1000: mean loss = tf.Tensor(0.07199547, shape=(), dtype=float32)\n",
            "step 1100: mean loss = tf.Tensor(0.07195838, shape=(), dtype=float32)\n",
            "step 1200: mean loss = tf.Tensor(0.07192171, shape=(), dtype=float32)\n",
            "step 1300: mean loss = tf.Tensor(0.07188477, shape=(), dtype=float32)\n",
            "step 1400: mean loss = tf.Tensor(0.071848474, shape=(), dtype=float32)\n",
            "step 1500: mean loss = tf.Tensor(0.07181277, shape=(), dtype=float32)\n",
            "step 1600: mean loss = tf.Tensor(0.071798995, shape=(), dtype=float32)\n",
            "step 1700: mean loss = tf.Tensor(0.07177824, shape=(), dtype=float32)\n",
            "step 1800: mean loss = tf.Tensor(0.071751125, shape=(), dtype=float32)\n",
            "step 1900: mean loss = tf.Tensor(0.071725495, shape=(), dtype=float32)\n",
            "step 2000: mean loss = tf.Tensor(0.07169543, shape=(), dtype=float32)\n",
            "Start of epoch 8\n",
            "step 0: mean loss = tf.Tensor(0.07184127, shape=(), dtype=float32)\n",
            "step 100: mean loss = tf.Tensor(0.071840726, shape=(), dtype=float32)\n",
            "step 200: mean loss = tf.Tensor(0.071825184, shape=(), dtype=float32)\n",
            "step 300: mean loss = tf.Tensor(0.071801975, shape=(), dtype=float32)\n",
            "step 400: mean loss = tf.Tensor(0.07177034, shape=(), dtype=float32)\n",
            "step 500: mean loss = tf.Tensor(0.071738675, shape=(), dtype=float32)\n",
            "step 600: mean loss = tf.Tensor(0.07170704, shape=(), dtype=float32)\n",
            "step 700: mean loss = tf.Tensor(0.07167711, shape=(), dtype=float32)\n",
            "step 800: mean loss = tf.Tensor(0.07164542, shape=(), dtype=float32)\n",
            "step 900: mean loss = tf.Tensor(0.07161485, shape=(), dtype=float32)\n",
            "step 1000: mean loss = tf.Tensor(0.071612395, shape=(), dtype=float32)\n",
            "step 1100: mean loss = tf.Tensor(0.07159293, shape=(), dtype=float32)\n",
            "step 1200: mean loss = tf.Tensor(0.071568444, shape=(), dtype=float32)\n",
            "step 1300: mean loss = tf.Tensor(0.07154118, shape=(), dtype=float32)\n",
            "step 1400: mean loss = tf.Tensor(0.07151357, shape=(), dtype=float32)\n",
            "step 1500: mean loss = tf.Tensor(0.07148645, shape=(), dtype=float32)\n",
            "step 1600: mean loss = tf.Tensor(0.07145749, shape=(), dtype=float32)\n",
            "step 1700: mean loss = tf.Tensor(0.07142849, shape=(), dtype=float32)\n",
            "step 1800: mean loss = tf.Tensor(0.071400195, shape=(), dtype=float32)\n",
            "step 1900: mean loss = tf.Tensor(0.07137055, shape=(), dtype=float32)\n",
            "step 2000: mean loss = tf.Tensor(0.07134155, shape=(), dtype=float32)\n",
            "Start of epoch 9\n",
            "step 0: mean loss = tf.Tensor(0.07149163, shape=(), dtype=float32)\n",
            "step 100: mean loss = tf.Tensor(0.07149103, shape=(), dtype=float32)\n",
            "step 200: mean loss = tf.Tensor(0.07148063, shape=(), dtype=float32)\n",
            "step 300: mean loss = tf.Tensor(0.071471795, shape=(), dtype=float32)\n",
            "step 400: mean loss = tf.Tensor(0.071448654, shape=(), dtype=float32)\n",
            "step 500: mean loss = tf.Tensor(0.07142249, shape=(), dtype=float32)\n",
            "step 600: mean loss = tf.Tensor(0.07139559, shape=(), dtype=float32)\n",
            "step 700: mean loss = tf.Tensor(0.071366325, shape=(), dtype=float32)\n",
            "step 800: mean loss = tf.Tensor(0.07133722, shape=(), dtype=float32)\n",
            "step 900: mean loss = tf.Tensor(0.07130738, shape=(), dtype=float32)\n",
            "step 1000: mean loss = tf.Tensor(0.07127884, shape=(), dtype=float32)\n",
            "step 1100: mean loss = tf.Tensor(0.07126032, shape=(), dtype=float32)\n",
            "step 1200: mean loss = tf.Tensor(0.07123693, shape=(), dtype=float32)\n",
            "step 1300: mean loss = tf.Tensor(0.07121137, shape=(), dtype=float32)\n",
            "step 1400: mean loss = tf.Tensor(0.07119681, shape=(), dtype=float32)\n",
            "step 1500: mean loss = tf.Tensor(0.07117412, shape=(), dtype=float32)\n",
            "step 1600: mean loss = tf.Tensor(0.07115018, shape=(), dtype=float32)\n",
            "step 1700: mean loss = tf.Tensor(0.071126126, shape=(), dtype=float32)\n",
            "step 1800: mean loss = tf.Tensor(0.07110159, shape=(), dtype=float32)\n",
            "step 1900: mean loss = tf.Tensor(0.071076654, shape=(), dtype=float32)\n",
            "step 2000: mean loss = tf.Tensor(0.071051896, shape=(), dtype=float32)\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "metadata": {
        "id": "b01aGHbUKmiK",
        "colab_type": "code",
        "outputId": "d2fb05c6-fa88-4924-b816-eaace33bb90b",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 68
        }
      },
      "cell_type": "code",
      "source": [
        "K.set_learning_phase(0)\n",
        "q_embedding, a_embedding = medical_qa_model(next(iter(d)))\n",
        "\n",
        "q_embedding = q_embedding / tf.norm(q_embedding, axis=-1, keepdims=True)\n",
        "a_embedding = a_embedding / tf.norm(a_embedding, axis=-1, keepdims=True)\n",
        "\n",
        "batch_score = tf.reduce_sum(q_embedding*a_embedding, axis=-1)\n",
        "baseline_score = tf.reduce_mean(tf.matmul(q_embedding,tf.transpose(a_embedding)))\n",
        "\n",
        "print('Training Batch Cos similarity')\n",
        "print(tf.reduce_mean(batch_score))\n",
        "print('Baseline: {0}'.format(baseline_score))"
      ],
      "execution_count": 12,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Training Batch Cos similarity\n",
            "tf.Tensor(0.038819928, shape=(), dtype=float32)\n",
            "Baseline: 0.017509445548057556\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "metadata": {
        "id": "PDHSYOZbRiPS",
        "colab_type": "code",
        "outputId": "e9933c94-eca5-46b5-9bd7-a6cd9ee44017",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 68
        }
      },
      "cell_type": "code",
      "source": [
        "eval_d = create_dataset_for_ffn(data_dir='/content/gdrive/My Drive/mqa-biobert', mode='eval', batch_size=64)\n",
        "q_embedding, a_embedding = medical_qa_model(next(iter(eval_d)))\n",
        "\n",
        "q_embedding = q_embedding / tf.norm(q_embedding, axis=-1, keepdims=True)\n",
        "a_embedding = a_embedding / tf.norm(a_embedding, axis=-1, keepdims=True)\n",
        "\n",
        "batch_score = tf.reduce_sum(q_embedding*a_embedding, axis=-1)\n",
        "baseline_score = tf.reduce_mean(tf.matmul(q_embedding,tf.transpose(a_embedding)))\n",
        "\n",
        "print('Eval Batch Cos similarity')\n",
        "print(tf.reduce_mean(batch_score))\n",
        "print('Baseline: {0}'.format(baseline_score))"
      ],
      "execution_count": 13,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Eval Batch Cos similarity\n",
            "tf.Tensor(0.032733116, shape=(), dtype=float32)\n",
            "Baseline: 0.01878243125975132\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "metadata": {
        "id": "JRuK18Jo1TlP",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        },
        "outputId": "2a7e51cb-2a5d-42a5-e7cc-ff007aaaab60"
      },
      "cell_type": "code",
      "source": [
        ""
      ],
      "execution_count": 11,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "[<tf.Tensor: id=2190117, shape=(), dtype=float32, numpy=2.8632064>]"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 11
        }
      ]
    }
  ]
}