{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "name": "TextToBertEmbedToFFNNEmbed.ipynb",
      "version": "0.3.2",
      "provenance": []
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "accelerator": "GPU"
  },
  "cells": [
    {
      "metadata": {
        "id": "1ihG5F0brAnN",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "# Instructions\n",
        "\n",
        "# Open this and the other notebook (BERTServiceForFFNNConversions.ipynb)\n",
        "\n",
        "# make sure both notebooks are on GPU mode. Make sure they are both connected, the top right of the notebook should NOT read 'busy' or 'connecting'. \n",
        "#Note: These notebooks won't work in CPU or TPU mode (perhaps there's a workaround)\n",
        "\n",
        "#Run the first line of code in this Notebook, do not run anything else. Bert As a Service is not compatible with TF 2.0, so do not intall TF2.0 yet. \n",
        "\n",
        "#Run all the other lines of BERTServiceForFFNNConversions.ipynb\n",
        "\n",
        "#Wait 10 seconds after Bert as a service is running. The output of the last line will say something like \"I:VENTILATOR:[__i:_ru:163]:all set, ready to serve request!\"\n",
        "\n",
        "#Run all other lines of this notebook. Some how installing TF 2.0 after bert service is running will not interfere with it. "
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "QKQlXhkY05Cf",
        "colab_type": "code",
        "outputId": "8a7e2989-91c4-490a-a750-cededa63185c",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        }
      },
      "cell_type": "code",
      "source": [
        "from google.colab import drive\n",
        "drive.mount('/content/gdrive')"
      ],
      "execution_count": 3,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Drive already mounted at /content/gdrive; to attempt to forcibly remount, call drive.mount(\"/content/gdrive\", force_remount=True).\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "metadata": {
        "id": "-qnnCMivkHnj",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "import os"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "31JNZtf1o8Hs",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "#DO NOT INSTALL TF 2.0 UNTIL BERT SERVICE IS RUNNING"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "GKYTdVN3zThA",
        "colab_type": "code",
        "outputId": "8a0ac75e-7be1-48a6-ad94-10a48b1a17c3",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 541
        }
      },
      "cell_type": "code",
      "source": [
        "import numpy as np\n",
        "# install tf 2.0\n",
        "from __future__ import absolute_import, division, print_function, unicode_literals\n",
        "\n",
        "#DO NOT INSTALL TF 2.0 UNTIL BERT SERVICE IS RUNNING\n",
        "!pip install tensorflow-gpu==2.0.0-alpha0\n",
        "import tensorflow as tf\n",
        "# tf.compat.v1.disable_v2_behavior()\n",
        "\n",
        "print(tf.__version__)\n",
        "import requests\n"
      ],
      "execution_count": 4,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Collecting tensorflow-gpu==2.0.0-alpha0\n",
            "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/1a/66/32cffad095253219d53f6b6c2a436637bbe45ac4e7be0244557210dc3918/tensorflow_gpu-2.0.0a0-cp36-cp36m-manylinux1_x86_64.whl (332.1MB)\n",
            "\u001b[K    100% |████████████████████████████████| 332.1MB 47kB/s \n",
            "\u001b[?25hRequirement already satisfied: six>=1.10.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==2.0.0-alpha0) (1.12.0)\n",
            "Requirement already satisfied: protobuf>=3.6.1 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==2.0.0-alpha0) (3.7.1)\n",
            "Requirement already satisfied: keras-applications>=1.0.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==2.0.0-alpha0) (1.0.7)\n",
            "Requirement already satisfied: astor>=0.6.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==2.0.0-alpha0) (0.7.1)\n",
            "Collecting tb-nightly<1.14.0a20190302,>=1.14.0a20190301 (from tensorflow-gpu==2.0.0-alpha0)\n",
            "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/a9/51/aa1d756644bf4624c03844115e4ac4058eff77acd786b26315f051a4b195/tb_nightly-1.14.0a20190301-py3-none-any.whl (3.0MB)\n",
            "\u001b[K    100% |████████████████████████████████| 3.0MB 5.5MB/s \n",
            "\u001b[?25hRequirement already satisfied: wheel>=0.26 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==2.0.0-alpha0) (0.33.1)\n",
            "Requirement already satisfied: gast>=0.2.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==2.0.0-alpha0) (0.2.2)\n",
            "Requirement already satisfied: grpcio>=1.8.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==2.0.0-alpha0) (1.15.0)\n",
            "Requirement already satisfied: numpy<2.0,>=1.14.5 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==2.0.0-alpha0) (1.16.3)\n",
            "Requirement already satisfied: keras-preprocessing>=1.0.5 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==2.0.0-alpha0) (1.0.9)\n",
            "Requirement already satisfied: absl-py>=0.7.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==2.0.0-alpha0) (0.7.1)\n",
            "Collecting tf-estimator-nightly<1.14.0.dev2019030116,>=1.14.0.dev2019030115 (from tensorflow-gpu==2.0.0-alpha0)\n",
            "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/13/82/f16063b4eed210dc2ab057930ac1da4fbe1e91b7b051a6c8370b401e6ae7/tf_estimator_nightly-1.14.0.dev2019030115-py2.py3-none-any.whl (411kB)\n",
            "\u001b[K    100% |████████████████████████████████| 419kB 6.8MB/s \n",
            "\u001b[?25hCollecting google-pasta>=0.1.2 (from tensorflow-gpu==2.0.0-alpha0)\n",
            "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/64/bb/f1bbc131d6294baa6085a222d29abadd012696b73dcbf8cf1bf56b9f082a/google_pasta-0.1.5-py3-none-any.whl (51kB)\n",
            "\u001b[K    100% |████████████████████████████████| 61kB 28.2MB/s \n",
            "\u001b[?25hRequirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==2.0.0-alpha0) (1.1.0)\n",
            "Requirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from protobuf>=3.6.1->tensorflow-gpu==2.0.0-alpha0) (40.9.0)\n",
            "Requirement already satisfied: h5py in /usr/local/lib/python3.6/dist-packages (from keras-applications>=1.0.6->tensorflow-gpu==2.0.0-alpha0) (2.8.0)\n",
            "Requirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.6/dist-packages (from tb-nightly<1.14.0a20190302,>=1.14.0a20190301->tensorflow-gpu==2.0.0-alpha0) (0.15.2)\n",
            "Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.6/dist-packages (from tb-nightly<1.14.0a20190302,>=1.14.0a20190301->tensorflow-gpu==2.0.0-alpha0) (3.1)\n",
            "Installing collected packages: tb-nightly, tf-estimator-nightly, google-pasta, tensorflow-gpu\n",
            "Successfully installed google-pasta-0.1.5 tb-nightly-1.14.0a20190301 tensorflow-gpu-2.0.0a0 tf-estimator-nightly-1.14.0.dev2019030115\n",
            "2.0.0-alpha0\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "metadata": {
        "id": "WOiv_13Kzdj2",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "import os\n",
        "from glob import glob\n",
        "import numpy as np\n",
        "from sklearn.model_selection import train_test_split\n",
        "\n",
        "SEED = 42\n",
        "\n",
        "\n",
        "def _float_list_feature(value):\n",
        "    \"\"\"Returns a float_list from a float / double.\"\"\"\n",
        "    return tf.train.Feature(float_list=tf.train.FloatList(value=value))\n",
        "\n",
        "\n",
        "def _int64_list_feature(value):\n",
        "    \"\"\"Returns an int64_list from a bool / enum / int / uint.\"\"\"\n",
        "    return tf.train.Feature(int64_list=tf.train.Int64List(value=value))\n",
        "\n",
        "\n",
        "def _int64_feature(value):\n",
        "    \"\"\"Returns an int64_list from a bool / enum / int / uint.\"\"\"\n",
        "    return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n",
        "\n",
        "\n",
        "def create_generator_for_ffn(\n",
        "        file_list,\n",
        "        mode='train'):\n",
        "\n",
        "    # file_list = glob(os.path.join(data_dir, '*.csv'))\n",
        "\n",
        "    for full_file_path in file_list:\n",
        "        # full_file_path = os.path.join(data_dir, file_name)\n",
        "        if not os.path.exists(full_file_path):\n",
        "            raise FileNotFoundError(\"File %s not found\" % full_file_path)\n",
        "        df = pd.read_csv(full_file_path, encoding='utf8')\n",
        "\n",
        "        # so train test split\n",
        "        if mode == 'train':\n",
        "            df, _ = train_test_split(df, test_size=0.2, random_state=SEED)\n",
        "        else:\n",
        "            _, df = train_test_split(df, test_size=0.2, random_state=SEED)\n",
        "\n",
        "        for _, row in df.iterrows():\n",
        "            q_vectors = np.fromstring(row.question_bert.replace(\n",
        "                '[[', '').replace(']]', ''), sep=' ')\n",
        "            a_vectors = np.fromstring(row.answer_bert.replace(\n",
        "                '[[', '').replace(']]', ''), sep=' ')\n",
        "            vectors = np.stack([q_vectors, a_vectors], axis=0)\n",
        "            if mode in ['train', 'eval']:\n",
        "                print(vectors)                                      #added \n",
        "                yield vectors, 1\n",
        "            else:\n",
        "                print(vectors)                                        #added \n",
        "                yield vectors\n",
        "\n",
        "\n",
        "def ffn_serialize_fn(features):\n",
        "    features_tuple = {'features': _float_list_feature(\n",
        "        features[0].flatten()), 'labels': _int64_feature(features[1])}\n",
        "    example_proto = tf.train.Example(\n",
        "        features=tf.train.Features(feature=features_tuple))\n",
        "    return example_proto.SerializeToString()\n",
        "\n",
        "\n",
        "def make_tfrecord(data_dir, generator_fn, serialize_fn, suffix='', **kwargs):\n",
        "    \"\"\"Function to make TF Records from csv files\n",
        "    This function will take all csv files in data_dir, convert them\n",
        "    to tf example and write to *_{suffix}_train/eval.tfrecord to data_dir.\n",
        "\n",
        "    Arguments:\n",
        "        data_dir {str} -- dir that has csv files and store tf record\n",
        "        generator_fn {fn} -- A function that takes a list of filepath and yield the\n",
        "        parsed recored from file.\n",
        "        serialize_fn {fn} -- A function that takes output of generator fn and convert to tf example\n",
        "\n",
        "    Keyword Arguments:\n",
        "        suffix {str} -- suffix to add to tf record files (default: {''})\n",
        "    \"\"\"\n",
        "    file_list = glob(os.path.join(data_dir, '*.csv'))\n",
        "    train_tf_record_file_list = [\n",
        "        f.replace('.csv', '_{0}_train.tfrecord'.format(suffix)) for f in file_list]\n",
        "    test_tf_record_file_list = [\n",
        "        f.replace('.csv', '_{0}_eval.tfrecord'.format(suffix)) for f in file_list]\n",
        "    for full_file_path, train_tf_record_file_path, test_tf_record_file_path in zip(file_list, train_tf_record_file_list, test_tf_record_file_list):\n",
        "        print('Converting file {0} to TF Record'.format(full_file_path))\n",
        "        with tf.io.TFRecordWriter(train_tf_record_file_path) as writer:\n",
        "            for features in generator_fn([full_file_path], mode='train', **kwargs):\n",
        "                example = serialize_fn(features)\n",
        "                writer.write(example)\n",
        "        with tf.io.TFRecordWriter(test_tf_record_file_path) as writer:\n",
        "            for features in generator_fn([full_file_path], mode='eval', **kwargs):\n",
        "                example = serialize_fn(features)\n",
        "                writer.write(example)\n",
        "\n",
        "\n",
        "def create_dataset_for_ffn(\n",
        "        data_dir,\n",
        "        mode='train',\n",
        "        hidden_size=768,\n",
        "        shuffle_buffer=10000,\n",
        "        prefetch=10000,\n",
        "        batch_size=32):\n",
        "\n",
        "    tfrecord_file_list = glob(os.path.join(\n",
        "        data_dir, '*_FFN_{0}.tfrecord'.format((mode))))\n",
        "    if not tfrecord_file_list:\n",
        "        print('TF Record not found')\n",
        "        make_tfrecord(\n",
        "            data_dir, create_generator_for_ffn,\n",
        "            ffn_serialize_fn, 'FFN')\n",
        "\n",
        "    dataset = tf.data.TFRecordDataset(tfrecord_file_list)\n",
        "\n",
        "    def _parse_ffn_example(example_proto):\n",
        "        feature_description = {\n",
        "            'features': tf.io.FixedLenFeature([2*768], tf.float32),\n",
        "            'labels': tf.io.FixedLenFeature([], tf.int64, default_value=0),\n",
        "        }\n",
        "        feature_dict = tf.io.parse_single_example(\n",
        "            example_proto, feature_description)\n",
        "        return tf.reshape(feature_dict['features'], (2, 768)), feature_dict['labels']\n",
        "    dataset = dataset.map(_parse_ffn_example)\n",
        "\n",
        "    dataset = dataset.shuffle(shuffle_buffer)\n",
        "\n",
        "    dataset = dataset.prefetch(prefetch)\n",
        "\n",
        "    dataset = dataset.batch(batch_size)\n",
        "    return dataset"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "PRwbUQeXzfKu",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "from __future__ import absolute_import, division, print_function, unicode_literals\n",
        "\n",
        "import os\n",
        "import pandas as pd\n",
        "from sklearn.model_selection import train_test_split\n",
        "import numpy as np\n",
        "\n",
        "import tensorflow as tf\n",
        "import tensorflow.keras.backend as K\n",
        "\n",
        "\n",
        "class FFN(tf.keras.layers.Layer):\n",
        "    def __init__(\n",
        "            self,\n",
        "            hidden_size=768,                                                                #SG edit from 768 4-24-19\n",
        "            dropout=0.2,\n",
        "            residual=True,\n",
        "            name='FFN',\n",
        "            **kwargs):\n",
        "        \"\"\"Simple Dense wrapped with various layers\n",
        "        \"\"\"\n",
        "\n",
        "        super(FFN, self).__init__(name=name, **kwargs)\n",
        "        self.hidden_size = hidden_size\n",
        "        self.dropout = dropout\n",
        "        self.residual = residual\n",
        "        self.ffn_layer = tf.keras.layers.Dense(\n",
        "            units=hidden_size,\n",
        "            use_bias=True\n",
        "        )\n",
        "\n",
        "    def call(self, inputs):\n",
        "        ffn_embedding = self.ffn_layer(inputs)\n",
        "        ffn_embedding = tf.keras.layers.ReLU()(ffn_embedding)\n",
        "        if self.dropout > 0:\n",
        "            ffn_embedding = tf.keras.layers.Dropout(\n",
        "                self.dropout)(ffn_embedding)\n",
        "#         ffn_embedding = self.ffn_layer(inputs)  #SG edit from 768 4-24-19\n",
        "#         ffn_embedding = tf.keras.layers.ReLU()(ffn_embedding)  #SG edit from 768 4-24-19\n",
        "#         if self.dropout > 0:  #SG edit from 768 4-24-19\n",
        "#             ffn_embedding = tf.keras.layers.Dropout(  #SG edit from 768 4-24-19\n",
        "#                 self.dropout)(ffn_embedding)  #SG edit from 768 4-24-19\n",
        "\n",
        "\n",
        "        if self.residual:\n",
        "            ffn_embedding += inputs\n",
        "        return ffn_embedding\n",
        "\n",
        "\n",
        "class MedicalQAModel(tf.keras.Model):\n",
        "    def __init__(self, name=''):\n",
        "        super(MedicalQAModel, self).__init__(name=name)\n",
        "        self.q_ffn = FFN(name='QFFN', input_shape=(768,))\n",
        "        self.a_ffn = FFN(name='AFFN', input_shape=(768,))\n",
        "\n",
        "    def call(self, inputs):\n",
        "#         print(inputs)\n",
        "#         tf.print(inputs)\n",
        "        q_bert_embedding, a_bert_embedding = tf.unstack(inputs, axis=1)\n",
        "#         print(q_bert_embedding)\n",
        "#         print(a_bert_embedding)\n",
        "#         tf.print(q_bert_embedding)\n",
        "#         tf.print(a_bert_embedding)\n",
        "        q_embedding, a_embedding = self.q_ffn(\n",
        "            q_bert_embedding), self.a_ffn(a_bert_embedding)\n",
        "        return tf.stack([q_embedding, a_embedding], axis=1)\n",
        "\n",
        "\n",
        "class BioBert(tf.keras.Model):\n",
        "    def __init__(self, name=''):\n",
        "        super(BioBert, self).__init__(name=name)\n",
        "\n",
        "    def call(self, inputs):\n",
        "\n",
        "        # inputs is dict with input features\n",
        "        input_ids, input_masks, segment_ids = inputs\n",
        "        # pass to bert\n",
        "        # with shape of (batch_size/2*batch_size, max_seq_len, hidden_size)\n",
        "        # TODO(Alex): Add true bert model\n",
        "        # Input: input_ids, input_masks, segment_ids all with shape (None, max_seq_len)\n",
        "        # Output: a tensor with shape (None, max_seq_len, hidden_size)\n",
        "        fake_bert_output = tf.expand_dims(tf.ones_like(\n",
        "            input_ids, dtype=tf.float32), axis=-1)*tf.ones([1, 1, 768], dtype=tf.float32)\n",
        "        max_seq_length = tf.shape(fake_bert_output)[-2]\n",
        "        hidden_size = tf.shape(fake_bert_output)[-1]\n",
        "\n",
        "        bert_output = tf.reshape(\n",
        "            fake_bert_output, (-1, 2, max_seq_length, hidden_size))\n",
        "        return bert_output\n",
        "\n",
        "\n",
        "class MedicalQAModelwithBert(tf.keras.Model):\n",
        "    def __init__(\n",
        "            self,\n",
        "            hidden_size=768,\n",
        "            dropout=0.2,\n",
        "            residual=True,\n",
        "            activation=tf.keras.layers.ReLU(),\n",
        "            name=''):\n",
        "        super(MedicalQAModelwithBert, self).__init__(name=name)\n",
        "        self.biobert = BioBert()\n",
        "        self.q_ffn_layer = FFN(\n",
        "            hidden_size=hidden_size,\n",
        "            dropout=dropout,\n",
        "            residual=residual,\n",
        "            activation=activation)\n",
        "        self.a_ffn_layer = FFN(\n",
        "            hidden_size=hidden_size,\n",
        "            dropout=dropout,\n",
        "            residual=residual,\n",
        "            activation=activation)\n",
        "\n",
        "    def _avg_across_token(self, tensor):\n",
        "        if tensor is not None:\n",
        "            tensor = tf.reduce_mean(tensor, axis=1)\n",
        "        return tensor\n",
        "\n",
        "    def call(self, inputs):\n",
        "\n",
        "        q_bert_embedding, a_bert_embedding = self.biobert(inputs)\n",
        "\n",
        "        # according to USE, the DAN network average embedding across tokens\n",
        "        q_bert_embedding = self._avg_across_token(q_bert_embedding)\n",
        "        a_bert_embedding = self._avg_across_token(a_bert_embedding)\n",
        "\n",
        "        q_embedding = self.q_ffn_layer(q_bert_embedding)\n",
        "        a_embedding = self.a_ffn_layer(a_bert_embedding)\n",
        "\n",
        "        return tf.stack([q_embedding, a_embedding], axis=1)\n",
        "\n",
        "      \n",
        "      \n",
        "# def qa_pair_cross_entropy_loss(y_true, y_pred):\n",
        "#     y_true = tf.eye(tf.shape(y_pred)[0])\n",
        "#     q_embedding, a_embedding = tf.unstack(y_pred, axis=1)\n",
        "#     similarity_matrix = tf.matmul(\n",
        "#         q_embedding, a_embedding, transpose_b=True)\n",
        "#     similarity_matrix_logits = tf.math.sigmoid(similarity_matrix)\n",
        "#     return tf.keras.losses.categorical_crossentropy(y_true, similarity_matrix_logits, from_logits=True)\n",
        "\n",
        "def qa_pair_cross_entropy_loss(y_true, y_pred):\n",
        "    y_true = tf.eye(tf.shape(y_pred)[0])\n",
        "    q_embedding, a_embedding = tf.unstack(y_pred, axis=1)\n",
        "    similarity_matrix = tf.matmul(\n",
        "        a = q_embedding, b = a_embedding, transpose_b=True)\n",
        "    similarity_matrix_softmaxed = tf.nn.softmax(similarity_matrix)\n",
        "    K.print_tensor(similarity_matrix_softmaxed, message=\"similarity_matrix_softmaxed is: \")\n",
        "    return tf.keras.losses.categorical_crossentropy(y_true, similarity_matrix_softmaxed, from_logits=False)\n",
        "\n"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "_LVFHJL5znpm",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "# training config\n",
        "batch_size = 64\n",
        "num_epochs=35\n",
        "learning_rate=0.0001\n",
        "validation_split=0.2\n",
        "shuffle_buffer=50000\n",
        "prefetch=50000\n",
        "\n",
        "#Gdrive link to tfrecords\n",
        "#https://drive.google.com/open?id=1wRc1jtl5Q0objpfualNFwpg4H575tmks\n",
        "#Place the files in your own Google drive in a folder called 'mqa_tf_record'\n",
        "data_path='/content/gdrive/My Drive/mqa_tf_record'\n",
        "\n",
        "#Gdrive link to model\n",
        "#https://drive.google.com/open?id=1TxR8UBzoBtlewO3lt5efLQgGP_3BRPZm\n",
        "#Place the files in your own Google drive in a folder called 'mqa_models'\n",
        "model_path = '/content/gdrive/My Drive/mqa_models/ffn_model_cross_entropy'"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "GySah9vmzvVn",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "  d = create_dataset_for_ffn(\n",
        "      data_path, batch_size=batch_size, shuffle_buffer=shuffle_buffer, prefetch=prefetch)\n",
        "  eval_d = create_dataset_for_ffn(\n",
        "      data_path, batch_size=batch_size, mode='eval')\n",
        "  medical_qa_model = MedicalQAModel()\n",
        "  optimizer = tf.keras.optimizers.Adam(lr=learning_rate)\n",
        "  medical_qa_model.compile(\n",
        "      optimizer=optimizer, loss=qa_pair_cross_entropy_loss)\n",
        "\n",
        "  epochs = num_epochs\n",
        "  loss_metric = tf.keras.metrics.Mean()\n",
        "\n",
        "#   history = medical_qa_model.fit(d, epochs=epochs, validation_data=eval_d)\n",
        "#   history = medical_qa_model.train_on_batch(d)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "h1zDUw10zxwI",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "model_path2 = '/content/gdrive/My Drive/mqa_models/ffn_model_cross_entropy.ckpt'\n",
        "\n",
        "checkpoint = tf.keras.callbacks.ModelCheckpoint(model_path2, monitor='loss', verbose=1, save_best_only=True)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "1hkG6NfFzylm",
        "colab_type": "code",
        "outputId": "d2d2230c-bbb3-4791-a011-5f1f76aacfb8",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        }
      },
      "cell_type": "code",
      "source": [
        "checkpoint_dir = os.path.dirname(model_path2)\n",
        "print(checkpoint_dir)"
      ],
      "execution_count": 10,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "/content/gdrive/My Drive/mqa_models\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "metadata": {
        "id": "vIvJIp7Pz0kN",
        "colab_type": "code",
        "outputId": "f5c09a7c-3ce0-4de8-a8e4-3fd30ee514eb",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        }
      },
      "cell_type": "code",
      "source": [
        "medical_qa_model.load_weights(model_path2)"
      ],
      "execution_count": 11,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "<tensorflow.python.training.tracking.util.CheckpointLoadStatus at 0x7f9a68527ac8>"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 11
        }
      ]
    },
    {
      "metadata": {
        "id": "q6OFGaC4z2cp",
        "colab_type": "code",
        "outputId": "fd69d371-ec2e-4833-b865-a057d6d2599b",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 406
        }
      },
      "cell_type": "code",
      "source": [
        "#This is optional, it's just to make sure weights are loaded correctly. Loss should start below 2 for batch size 64\n",
        "\n",
        "history = medical_qa_model.fit(d, epochs=2, validation_data=eval_d )"
      ],
      "execution_count": 30,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Epoch 1/2\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "WARNING: Logging before flag parsing goes to stderr.\n",
            "W0427 16:37:29.969590 140303120930688 deprecation.py:323] From /usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/backend.py:2924: Print (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2018-08-20.\n",
            "Instructions for updating:\n",
            "Use tf.print instead of tf.Print. Note that tf.print returns a no-output operator that directly prints the output. Outside of defuns or eager mode, this operator will not be executed unless it is directly specified in session.run or used as a control dependency for other operators. This is only a concern in graph mode. Below is an example of how to ensure tf.print executes in graph mode:\n",
            "```python\n",
            "    sess = tf.Session()\n",
            "    with sess.as_default():\n",
            "        tensor = tf.range(10)\n",
            "        print_op = tf.print(tensor)\n",
            "        with tf.control_dependencies([print_op]):\n",
            "          out = tf.add(tensor, tensor)\n",
            "        sess.run(out)\n",
            "    ```\n",
            "Additionally, to use tf.print in python 2.7, users must make sure to import\n",
            "the following:\n",
            "\n",
            "  `from __future__ import print_function`\n",
            "\n"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "3349/3349 [==============================] - 52s 16ms/step - loss: 1.3162 - val_loss: 0.0000e+00\n",
            "Epoch 2/2\n",
            "3349/3349 [==============================] - 27s 8ms/step - loss: 1.2721 - val_loss: 1.9777\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "metadata": {
        "id": "16QNu5vRd7lT",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "from bert_serving.client import BertClient"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "u7j43y1id-Zw",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "bc = BertClient()"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "xXegOcfTo1QW",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "def QgetEmbeds(x):\n",
        "    bertEmbed= bc.encode([x])\n",
        "    textinput = tf.constant([bertEmbed[0], bertEmbed[0]])\n",
        "    textinputF = tf.dtypes.cast(textinput , dtype=tf.float32) \n",
        "    textinputE = tf.expand_dims(textinputF, 0)\n",
        "    q_embedding, a_embedding = tf.unstack(medical_qa_model(textinputE), axis=1)\n",
        "    return q_embedding.numpy()"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "9jZTvSEVcxWw",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "inputtext = \"Is this schizophrenia? How can I get appropriate help for it in the middle of nowhere? Age: 18 Sex: Male Height: 5 10 Weight: 158 lb Race: Black Duration of complaint: 3 years Location (Geographic and on body): Psyche Any existing relevant medical issues (if any): Trauma from child abuse and molestation Current medications (if any): None Include a photo if relevant (skin condition for example)I think I might have schizophrenia. I've felt suicidal for a long time, and I started hearing voices about three years ago that come and go. Sometimes they say things that make no sense but have a sort of poetic rhythm. Example: The crime of the nine crushes the dime of the lime. I've been reading Freud and other investigators of the subconscious trying to find the meaning of the things I hear, but I haven't come across anything conclusive. The voices can also be nasty or say things that are untrue or paranoid. Sometimes they straight up call me ugly and stupid and say I should kill myself. Sometimes when I see a white person, a voice says they hate black people and want to kill me. I know it's just my imagination, but I get suspicious and keep my guard up around them. I really don't know if I have delusions. From what I've read, they're hard to recognize for the individual. I don't trust anything the voices say, but I still feel depressed and suicidal due to life circumstances. I live with my mom and her boyfriend, and they constantly fight and hit each other. My mom is 90% of the problem because she has hair trigger emotions and gets very mad over nothing. My whole life feels extremely hopeless, and I sleep excessively and sob all the time. I had two friends who both stopped talking to me and moved away. We live in the middle of nowhere and the nearest mental hospital is an hour and a half away. I'd have to take her car, and if it's bad enough that they want to strap me to a bed as they often do with suicidal people, then I wouldn't be able to bring her car back for a while. There is a clinic in town that my mom won't let me go to because the doctors all hate black people. She said they misdiagnosed her breast cancer and tried to implant a chip inside her because they want to keep track of all the black people. She said they were taking a blood sample and injected a neurotoxin. This sounds very paranoid to me, but regardless she's the one with the health insurance who gets to decide where I go and what I do. She had to fly to Rochester, MN for cancer treatment because nobody in our area would help her when she had a breast lump that required a mastectomy.\""
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "xfUWacuhlYPj",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "testtt = QgetEmbeds(inputtext)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "Ra8HCll2l0xO",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 3242
        },
        "outputId": "e2dc9b4e-e95b-48e2-be77-cf536f29c5bd"
      },
      "cell_type": "code",
      "source": [
        "print(testtt)"
      ],
      "execution_count": 29,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "[[-7.41489232e-02 -2.93513060e-01  7.94782341e-01  3.73879313e-01\n",
            "  -1.83415543e-02 -1.18541509e-01  1.46455944e-01 -1.50270730e-01\n",
            "   8.95201683e-01  4.57437724e-01  4.25101250e-01  6.55673265e-01\n",
            "   9.01300162e-02  5.38662851e-01  2.21652165e-02 -1.02907240e-01\n",
            "   1.15937382e-01  3.38005483e-01 -3.31434935e-01  8.24319571e-02\n",
            "   1.27929258e+00  2.15683386e-01  5.31941831e-01 -1.56582832e-01\n",
            "  -1.59840472e-02  2.44420439e-01  3.93882424e-01  9.94355857e-01\n",
            "   1.18196058e+00  2.55656362e-01 -1.95982307e-02  5.19520521e-01\n",
            "  -2.01406032e-01 -2.40532070e-01  2.62885481e-01  1.97201937e-01\n",
            "  -1.45242333e-01  2.97877640e-01 -2.97187492e-02  6.45119548e-01\n",
            "   1.00307986e-01  2.55139470e-01  1.61336005e-01  2.63374090e-01\n",
            "   3.20117831e-01  7.69968152e-01  6.99028373e-02 -1.43000603e-01\n",
            "  -5.75600043e-02 -1.89701223e-03  3.89344215e-01  7.32022464e-01\n",
            "   9.77720976e-01 -5.88473082e-02  1.02633631e+00 -1.07916273e-01\n",
            "  -8.85868371e-02  4.19590503e-01 -3.18220615e-01  7.32022703e-01\n",
            "   1.23360634e-01  1.61591172e-01  3.56453836e-01  4.95910227e-01\n",
            "   4.96974349e-01  8.16630423e-02  1.02924705e+00  4.85617846e-01\n",
            "   2.91709274e-01 -1.66024074e-01  1.96305513e-02 -1.77377105e-01\n",
            "   4.37245637e-01 -2.61822250e-02 -4.61974256e-02  6.22735143e-01\n",
            "   1.87106669e-01 -1.42409116e-01  5.34897745e-01  3.86150211e-01\n",
            "   1.02268064e+00  2.16812804e-01 -2.84057617e-01  6.56735241e-01\n",
            "   2.40948901e-01  7.60313496e-02 -5.41233793e-02  1.30884677e-01\n",
            "  -1.41254425e-01  4.76742208e-01  3.60900201e-02 -2.84765899e-01\n",
            "   1.24902548e-02  8.71413797e-02 -8.77401009e-02  1.09080866e-01\n",
            "   1.56746060e-01  4.88514066e-01 -1.56817883e-01  3.34875941e-01\n",
            "   9.48958397e-01 -1.93889901e-01  1.36602104e-01  7.59115040e-01\n",
            "   7.97980428e-01  1.45484209e-01  1.23608671e-01 -2.58319438e-01\n",
            "   1.18624838e-03  4.71438766e-02  2.57149637e-01 -4.51946482e-02\n",
            "   7.26759970e-01  2.68867195e-01 -3.91712934e-02  1.20227247e-01\n",
            "   9.22199413e-02  4.82074410e-01  2.93111324e-01 -1.32785887e-01\n",
            "  -1.23086706e-01 -2.85642624e-01  3.44588518e-01  5.18251181e-01\n",
            "   2.69254535e-01  2.36950815e-03  8.36153567e-01  6.13622785e-01\n",
            "   4.87049401e-01  7.58216828e-02 -3.02744329e-01  4.76951212e-01\n",
            "   4.74723399e-01  1.57413036e-01  2.65927106e-01  2.25315597e-02\n",
            "   8.51390779e-01  1.65096092e+00  2.75377810e-01  2.91307569e-01\n",
            "  -2.32223570e-02 -1.21265024e-01  3.84582967e-01  5.83103418e-01\n",
            "  -2.19896510e-01  2.88005471e-02 -2.55411178e-01  1.48051977e-01\n",
            "  -8.35044682e-02  2.12414354e-01  3.47225189e-01  1.02310467e+00\n",
            "   1.04903817e-01  4.72418487e-01  1.14305899e-01 -3.63195032e-01\n",
            "   4.39490259e-01  1.74652234e-01  3.72671366e-01 -1.02261445e-02\n",
            "  -3.55212301e-01  8.66043687e-01  3.02666500e-02  1.20133817e+00\n",
            "   4.51328933e-01 -2.06198230e-01  6.39776051e-01  2.71115363e-01\n",
            "   5.05507350e-01  2.99591184e-01  4.20816541e-02  2.96086848e-01\n",
            "  -2.03330696e-01  4.05751765e-01  2.02102259e-01  5.74395061e-02\n",
            "  -9.37995762e-02 -1.06999204e-01  6.26835465e-01 -1.05715424e-01\n",
            "  -1.05567910e-01  1.26252100e-01 -1.74246579e-01  2.20699295e-01\n",
            "  -5.69566824e-02  7.54051685e-01  4.20613319e-01  2.49280810e-01\n",
            "   5.64832509e-01  4.95270520e-01  1.50842234e-01  2.39060476e-01\n",
            "   3.62844467e-02  3.92455608e-01 -9.96187627e-02  5.03294095e-02\n",
            "   1.98083490e-01  2.40338724e-02  6.56812638e-02 -2.91174889e-01\n",
            "  -1.27878457e-01  5.52352890e-02 -1.86320573e-01 -1.24469712e-01\n",
            "   3.57629396e-02  2.31015325e-01  9.93086815e-01 -3.54536623e-03\n",
            "   2.61142999e-02  1.08199978e+00  5.60185909e-01  3.20944190e-02\n",
            "   4.62994725e-01  3.81940424e-01  4.00994897e-01  4.70407248e-01\n",
            "   3.36418122e-01 -2.07853884e-01  1.24652013e-01  2.48011678e-01\n",
            "   6.88594103e-01 -2.71484435e-01  2.91991055e-01  9.92854834e-02\n",
            "  -1.97038352e-01  3.07085276e-01  8.31006825e-01  7.15158433e-02\n",
            "  -9.42814536e-03 -4.20478582e-02 -1.08955786e-01  4.15213138e-01\n",
            "   3.88351768e-01 -3.00294962e-02 -4.65912759e-01  3.01614821e-01\n",
            "  -1.57572627e-02 -2.97352672e-04  4.34880137e-01  2.77245846e-02\n",
            "   8.59782517e-01 -1.51982218e-01  5.30192614e-01  7.13835835e-01\n",
            "  -2.69314408e-01 -1.81100234e-01  6.08739182e-02  6.84422910e-01\n",
            "   8.38957429e-02 -1.29824042e-01  2.90843099e-01 -7.98895806e-02\n",
            "  -4.18573916e-01  7.32528865e-02  6.31971121e-01  7.21075773e-01\n",
            "   5.81046939e-01  6.28729999e-01  8.37211963e-03 -1.73782557e-01\n",
            "   1.13222443e-01  3.83066416e-01  4.45631891e-02  6.64960146e-02\n",
            "  -5.62367857e-01  2.26522252e-01  2.82426089e-01  7.57081658e-02\n",
            "  -2.68720686e-01 -1.96223006e-01  7.46848285e-02  1.59602225e-01\n",
            "  -2.02384889e-02  2.47760832e-01 -1.34345472e-01 -2.74353065e-02\n",
            "  -3.69789600e-02  8.33010077e-02 -1.10246383e-01 -9.36259508e-01\n",
            "   7.71556273e-02  1.10023928e+00 -3.99282008e-01 -1.99886970e-02\n",
            "   9.77565125e-02 -1.00845225e-01  3.35204959e-01  4.36077118e-01\n",
            "   3.24296951e-02  5.27877808e-01  2.31060058e-01  1.46396756e-02\n",
            "  -2.78334290e-01 -5.05462348e-01  3.53344470e-01 -3.47200453e-01\n",
            "   6.92625046e-02  5.80172300e-01  6.35009766e-01  1.49923682e-01\n",
            "   5.13323665e-01  2.09902957e-01  9.10706341e-01  5.84331155e-02\n",
            "   3.98809582e-01  2.46948451e-01  2.51224071e-01 -6.61336780e-02\n",
            "   4.71623719e-01  6.18649900e-01  4.30142283e-01  8.82136345e-01\n",
            "   3.49702179e-01  4.60848957e-01  1.01112090e-01  1.49585456e-02\n",
            "   3.66476566e-01  9.41427708e-01 -7.23750770e-01  2.06986219e-01\n",
            "   6.47197366e-01 -3.75789732e-01  6.40457988e-01 -1.68149799e-01\n",
            "  -3.68193388e-01  1.13984370e+00  1.14468068e-01  1.74757212e-01\n",
            "  -4.68529761e-02  7.61152506e-02 -5.07717669e-01  9.50015336e-03\n",
            "   8.79521132e-01  2.32277960e-01 -1.69301823e-01  8.02093267e-01\n",
            "  -1.04428500e-01  1.08449841e+00 -5.87755442e-01  1.98773339e-01\n",
            "   3.06442857e-01 -3.54518890e-01  4.57875788e-01  2.46122301e-01\n",
            "  -2.78945137e-02 -2.17843994e-01  4.20660935e-02  1.68208688e-01\n",
            "   3.39139521e-01 -2.35234633e-01 -3.31964284e-01  7.86340356e-01\n",
            "   4.88489747e-01 -4.70240623e-01  1.11059058e+00  2.68118590e-01\n",
            "   1.09416574e-01  5.62125087e-01 -2.45525137e-01 -2.90580213e-01\n",
            "   5.40646650e-02 -1.16945297e-01  2.15076685e-01 -4.21976626e-01\n",
            "   7.05453694e-01  8.34342599e-01 -2.75397211e-01  2.72990108e-01\n",
            "  -2.16806501e-01  2.01767027e-01 -6.73919171e-02 -2.70383120e-01\n",
            "   7.71808147e-01  1.95296735e-01  4.35743108e-02  2.40801886e-01\n",
            "  -7.46663138e-02 -3.21222395e-01  9.70330954e-01 -3.49527627e-01\n",
            "   6.61842406e-01  7.92733014e-01  3.04125756e-01 -1.45193741e-01\n",
            "  -2.61710346e-01  5.82072064e-02 -1.09098688e-01 -3.37937176e-01\n",
            "   4.53017712e-01 -1.80005446e-01 -1.91391349e-01  1.11552188e-04\n",
            "   3.20878476e-01 -2.51992106e-01  5.57620466e-01 -1.45006776e-01\n",
            "  -8.66115466e-02  6.77493095e-01  9.99955311e-02 -2.40599632e-01\n",
            "   1.09942043e+00 -4.10826594e-01  1.75121462e+00  1.43590108e-01\n",
            "   8.16890955e-01  1.56973243e-01 -4.24713969e-01  1.83388218e-01\n",
            "  -1.12806961e-01 -1.13971829e-02  1.37982473e-01  7.31209636e-01\n",
            "   7.67648876e-01  1.08734357e+00  2.95680046e-01  5.82818866e-01\n",
            "   8.30679536e-01  7.72817492e-01  8.25478733e-02  3.46971154e-02\n",
            "   3.80077869e-01  7.95408189e-01  7.34756514e-02  2.50236064e-01\n",
            "   2.03919277e-01 -1.73576087e-01 -5.97660020e-02 -2.62999862e-01\n",
            "   1.99215785e-01  3.80586646e-02  1.54418558e-01  2.93961614e-01\n",
            "   1.31807625e-01  3.89889479e-01  1.32716689e-02 -1.97366700e-01\n",
            "   2.45060340e-01  4.81129885e-02  4.37726974e-01  5.17238736e-01\n",
            "   4.42645073e-01  8.89985442e-01  2.32714936e-02 -3.28949988e-01\n",
            "   5.07856905e-03 -5.56125538e-04  3.92101020e-01 -4.41737056e-01\n",
            "   2.86476910e-01  6.59016848e-01  9.66102362e-01  6.17435753e-01\n",
            "   4.72760886e-01  3.05148102e-02  2.85812080e-01 -1.52346778e+00\n",
            "  -4.14901346e-01  5.01294062e-02 -1.44137383e-01 -8.94063264e-02\n",
            "  -1.67116895e-01 -4.19837385e-02  2.77000070e-01  3.38022858e-01\n",
            "   1.69425499e+00  1.21215427e+00  9.13978100e-01  1.15624404e+00\n",
            "   3.37469339e-01  2.77882069e-01  1.29628158e+00  1.80842027e-01\n",
            "   3.00866753e-01  4.02653873e-01  7.00353086e-02 -3.73183429e-01\n",
            "   7.60874152e-01 -7.92585611e-02  3.52066420e-02  1.95682079e-01\n",
            "  -3.25760126e-01 -5.31787537e-02 -1.27145350e-01  5.05829901e-02\n",
            "   3.11637223e-01 -2.88228184e-01  3.44046026e-01 -1.22807205e-01\n",
            "   1.04912743e-01  3.78658623e-01 -1.84254289e-01  8.38112012e-02\n",
            "   9.03021693e-01  7.90936172e-01  6.05603099e-01  3.15686285e-01\n",
            "  -1.24879546e-01 -7.11674154e-01 -6.03524186e-02  1.93507358e-01\n",
            "   9.74234045e-01  2.92754211e-02  4.29471672e-01 -4.56254482e-01\n",
            "  -3.40333819e-01  7.82569230e-01  1.72192186e-01  3.12328696e-01\n",
            "   9.49750423e-01 -5.97506240e-02  5.31446576e-01 -2.04739973e-01\n",
            "   9.23077345e-01 -1.90457655e-03 -1.69838220e-01  2.84590088e-02\n",
            "   5.56098938e-01  1.49959818e-01  1.04912542e-01  1.20721854e-01\n",
            "   3.56957465e-01 -4.01087135e-01  4.80584055e-01 -2.19040468e-01\n",
            "  -2.14056909e-01  2.95348585e-01  3.63041818e-01  1.44026667e-01\n",
            "   1.07342577e+00  3.46794613e-02  5.40625155e-01  6.42439783e-01\n",
            "  -3.17751080e-01  2.86250889e-01  2.73141384e-01 -1.67177618e-01\n",
            "  -2.22162232e-02 -5.24795912e-02 -6.70086443e-01  3.37153822e-01\n",
            "  -7.95228302e-01 -1.50876381e-02  8.96376312e-01  6.38959110e-02\n",
            "   2.18763091e-02  6.60660207e-01 -1.36669725e-02  9.59495187e-01\n",
            "   1.51440993e-01  5.49008965e-01  5.93826830e-01  3.25662017e-01\n",
            "   7.53734350e-01  3.03831160e-01  3.80181074e-01  8.06029677e-01\n",
            "  -1.90314531e-01 -1.80357494e+01  3.71025294e-01 -7.06319809e-02\n",
            "  -1.63311869e-01  3.92410368e-01  3.48729551e-01  2.83091366e-01\n",
            "   1.35614812e-01  1.53587669e-01  4.25470918e-01  1.52162313e-02\n",
            "   7.24445462e-01  2.97637701e-01 -2.05613617e-02 -2.02872649e-01\n",
            "   6.71168804e-01 -2.61912048e-01 -1.78771183e-01  5.89101672e-01\n",
            "   6.71836793e-01 -1.24632366e-01  6.98557317e-01  1.20640062e-02\n",
            "   1.30495310e-01  5.95894575e-01  9.89553407e-02 -1.83993936e-01\n",
            "   1.12554878e-01 -4.07692976e-02  4.85972799e-02 -1.50241375e-01\n",
            "   2.08670139e-01 -2.66663879e-01  7.02865243e-01  1.37591505e+00\n",
            "  -2.33838379e-01 -2.74493754e-01  4.93015684e-02  9.59025621e-01\n",
            "   2.93430686e-02  1.08340216e+00  3.32588911e-01  7.32505441e-01\n",
            "   1.03879559e+00  3.43832970e-01  5.84375143e-01 -4.89367068e-01\n",
            "  -1.11316465e-01  2.66928434e-01  7.50380039e-01 -3.97428811e-01\n",
            "   8.15074086e-01  1.12268794e+00  1.15030989e-01  2.84153014e-01\n",
            "  -2.35357672e-01  3.64633530e-01  2.69554555e-01  7.44871676e-01\n",
            "  -2.74101913e-01  4.75997537e-01 -7.64113292e-02  3.76833975e-01\n",
            "   1.74219966e-01 -5.68406135e-02  6.29522145e-01  1.33558273e-01\n",
            "   2.97633857e-01 -1.09924704e-01  6.76710188e-01 -7.06556737e-02\n",
            "   3.36358368e-01  3.32395256e-01  4.78912532e-01  8.60749424e-01\n",
            "  -2.21785769e-01 -1.85144246e-01  8.01738203e-02  1.58531219e-01\n",
            "   4.44875807e-01 -1.73984364e-01  3.88805270e-02  1.25128552e-01\n",
            "  -7.82438517e-02  3.39551605e-02 -8.39903802e-02  2.56056756e-01\n",
            "   1.08268261e-01  4.08596396e-01  1.09559333e+00 -2.95938849e-01\n",
            "  -2.97606081e-01 -2.43526161e-01  3.38001102e-01  3.09662282e-01\n",
            "   1.40957046e+00  7.06326783e-01  7.22522438e-02 -3.14686626e-01\n",
            "   5.29068470e-01  7.52720177e-01 -4.64337856e-01  2.05376565e-01\n",
            "   7.27643251e-01  6.34209812e-02  2.16244720e-03  4.17063758e-02\n",
            "   8.47461224e-02  1.25637397e-01  7.78454363e-01  1.38273776e+00\n",
            "  -1.19298220e-01  2.88211226e-01  5.51254988e-01  3.08340210e-02\n",
            "   2.36210287e-01  6.25075102e-02  3.33344072e-01  5.84622622e-01\n",
            "  -2.10026547e-01 -3.40233862e-01  1.06539154e+00  7.25051165e-02\n",
            "   2.33997062e-01  5.46100199e-01 -7.05915466e-02  1.10505235e+00\n",
            "   5.08709028e-02  6.66589260e-01 -1.63878202e-01 -4.26678568e-01\n",
            "   1.23989776e-01  3.36920500e-01  5.70628405e-01  1.60263821e-01\n",
            "  -3.08017135e-01 -1.84421614e-02  2.95811325e-01  4.43802744e-01\n",
            "   9.45196569e-01 -8.59561443e-01  2.42208809e-01  3.07089359e-01\n",
            "   2.03996867e-01  5.58870912e-01  5.17475843e-01  1.91279262e-01\n",
            "  -3.90091330e-01 -5.17746806e-02 -4.90786493e-01 -5.77847123e-01\n",
            "   4.30610389e-01 -6.25911653e-02  5.80095172e-01  3.26318622e-01\n",
            "   1.09612100e-01  5.03061414e-01  6.04357004e-01 -2.79187769e-01\n",
            "  -1.73709080e-01  1.47630632e-01  1.16554732e-02  3.50724459e-01\n",
            "   2.54240870e-01  4.27579463e-01 -5.68382069e-02 -4.05390300e-02\n",
            "   5.64760625e-01  2.51818657e-01  9.44096565e-01 -9.39207897e-03\n",
            "   1.15281463e+00 -6.30617663e-02  1.21090412e-01 -3.65292758e-01\n",
            "  -1.87999725e-01  8.68452668e-01  1.12336898e+00  8.08920860e-01\n",
            "   4.71550047e-01  2.74739325e-01  4.39154416e-01 -1.25689879e-01\n",
            "   3.33676040e-01  7.83305615e-02  4.41620201e-01  4.64656055e-01\n",
            "  -2.34658182e-01 -1.00542068e-01 -3.23154867e-01  4.07329828e-01\n",
            "   4.73718435e-01  6.83684945e-02 -1.91489584e-03  7.69390017e-02\n",
            "   6.66889310e-01 -2.80559003e-01  8.74196827e-01  4.39553112e-02\n",
            "   7.67665386e-01  1.75062880e-01 -1.46693125e-01 -1.01579145e-01\n",
            "  -1.66272476e-01 -1.56962231e-01  6.52658343e-01  4.70679820e-01\n",
            "  -1.37417600e-01  7.95452118e-01  1.77984759e-01 -1.13299526e-01]]\n"
          ],
          "name": "stdout"
        }
      ]
    }
  ]
}