{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Serving Function\n",
    "\n",
    "This notebook demonstrates the Serving Function design pattern using Keras\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Simple text classification model\n",
    "\n",
    "This model uses transfer learning with TensorFlow Hub and Keras. It is based on https://www.tensorflow.org/tutorials/keras/text_classification_with_hub\n",
    "\n",
    "It classifies movie reviews as positive or negative using the text of the review. The reviews come from an IMDB dataset that contains the text of 50,000 movie reviews from the Internet Movie Database. These are split into 25,000 reviews for training and 25,000 reviews for testing."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Already installed if you are using Cloud AI Platform Notebooks\n",
    "#!pip install -q tensorflow-hub\n",
    "#!pip install -q tfds-nightly"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "\n",
    "\n",
    "import tensorflow_hub as hub\n",
    "import tensorflow_datasets as tfds\n",
    "train_data, test_data = tfds.load(\n",
    "    name=\"imdb_reviews\", \n",
    "    split=('train', 'test'),\n",
    "    as_supervised=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "split = 3 # 1/4 records is validation\n",
    "dataset_train = train_data.window(split, split + 1).flat_map(lambda *ds: ds[0] if len(ds) == 1 else tf.data.Dataset.zip(ds))\n",
    "dataset_validation = train_data.skip(split).window(1, split + 1).flat_map(lambda *ds: ds[0] if len(ds) == 1 else tf.data.Dataset.zip(ds))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "embedding = \"https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim-with-oov/1\"\n",
    "hub_layer = hub.KerasLayer(embedding, input_shape=[], \n",
    "                           dtype=tf.string, trainable=True, name='full_text')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model: \"sequential\"\n",
      "_________________________________________________________________\n",
      "Layer (type)                 Output Shape              Param #   \n",
      "=================================================================\n",
      "full_text (KerasLayer)       (None, 20)                389380    \n",
      "_________________________________________________________________\n",
      "h1_dense (Dense)             (None, 16)                336       \n",
      "_________________________________________________________________\n",
      "positive_review_logits (Dens (None, 1)                 17        \n",
      "=================================================================\n",
      "Total params: 389,733\n",
      "Trainable params: 389,733\n",
      "Non-trainable params: 0\n",
      "_________________________________________________________________\n"
     ]
    }
   ],
   "source": [
    "model = tf.keras.Sequential()\n",
    "model.add(hub_layer)\n",
    "model.add(tf.keras.layers.Dense(16, activation='relu', name='h1_dense'))\n",
    "model.add(tf.keras.layers.Dense(1, name='positive_review_logits'))\n",
    "\n",
    "model.summary()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/10\n",
      "37/37 [==============================] - 12s 331ms/step - loss: 0.7690 - accuracy: 0.5320 - val_loss: 0.6935 - val_accuracy: 0.5750\n",
      "Epoch 2/10\n",
      "37/37 [==============================] - 11s 294ms/step - loss: 0.6586 - accuracy: 0.6024 - val_loss: 0.6317 - val_accuracy: 0.6261\n",
      "Epoch 3/10\n",
      "37/37 [==============================] - 11s 300ms/step - loss: 0.6114 - accuracy: 0.6423 - val_loss: 0.5916 - val_accuracy: 0.6547\n",
      "Epoch 4/10\n",
      "37/37 [==============================] - 11s 295ms/step - loss: 0.5732 - accuracy: 0.6747 - val_loss: 0.5569 - val_accuracy: 0.6843\n",
      "Epoch 5/10\n",
      "37/37 [==============================] - 11s 303ms/step - loss: 0.5367 - accuracy: 0.7116 - val_loss: 0.5227 - val_accuracy: 0.7294\n",
      "Epoch 6/10\n",
      "37/37 [==============================] - 11s 299ms/step - loss: 0.4965 - accuracy: 0.7439 - val_loss: 0.4854 - val_accuracy: 0.7560\n",
      "Epoch 7/10\n",
      "37/37 [==============================] - 11s 303ms/step - loss: 0.4550 - accuracy: 0.7765 - val_loss: 0.4493 - val_accuracy: 0.7870\n",
      "Epoch 8/10\n",
      "37/37 [==============================] - 11s 297ms/step - loss: 0.4134 - accuracy: 0.8061 - val_loss: 0.4161 - val_accuracy: 0.8061\n",
      "Epoch 9/10\n",
      "37/37 [==============================] - 11s 303ms/step - loss: 0.3761 - accuracy: 0.8296 - val_loss: 0.3886 - val_accuracy: 0.8267\n",
      "Epoch 10/10\n",
      "37/37 [==============================] - 11s 300ms/step - loss: 0.3419 - accuracy: 0.8490 - val_loss: 0.3658 - val_accuracy: 0.8392\n"
     ]
    }
   ],
   "source": [
    "model.compile(optimizer='adam',\n",
    "              loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),\n",
    "              metrics=['accuracy'])\n",
    "\n",
    "\n",
    "\n",
    "history = model.fit(dataset_train.shuffle(10000).batch(512),\n",
    "                    epochs=10,\n",
    "                    validation_data=dataset_validation.batch(512),\n",
    "                    verbose=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loss: 0.381\n",
      "accuracy: 0.827\n"
     ]
    }
   ],
   "source": [
    "results = model.evaluate(test_data.batch(512), verbose=2)\n",
    "\n",
    "for name, value in zip(model.metrics_names, results):\n",
    "  print(\"%s: %.3f\" % (name, value))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Predict using the trained model in-memory\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[ 0.6965847]\n",
      " [ 1.61773  ]\n",
      " [-0.7543597]]\n"
     ]
    }
   ],
   "source": [
    "review1 = 'The film is based on a prize-winning novel.' # neutral\n",
    "review2 = 'The film is fast moving and has several great action scenes.' # positive\n",
    "review3 = 'The film was very boring. I walked out half-way.' # negative\n",
    "\n",
    "logits = model.predict(x=tf.constant([review1, review2, review3]))\n",
    "print(logits)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "8.203439 MB\n"
     ]
    }
   ],
   "source": [
    "## how big is the model in memory?\n",
    "import sys\n",
    "\n",
    "# From https://goshippo.com/blog/measure-real-size-any-python-object/\n",
    "def get_size(obj, seen=None):\n",
    "    \"\"\"Recursively finds size of objects\"\"\"\n",
    "    size = sys.getsizeof(obj)\n",
    "    if seen is None:\n",
    "        seen = set()\n",
    "    obj_id = id(obj)\n",
    "    if obj_id in seen:\n",
    "        return 0\n",
    "    # Important mark as seen *before* entering recursion to gracefully handle\n",
    "    # self-referential objects\n",
    "    seen.add(obj_id)\n",
    "    try:\n",
    "        if isinstance(obj, dict):\n",
    "            size += sum([get_size(v, seen) for v in obj.values()])\n",
    "            size += sum([get_size(k, seen) for k in obj.keys()])\n",
    "        elif hasattr(obj, '__dict__'):\n",
    "            size += get_size(obj.__dict__, seen)\n",
    "        elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)):\n",
    "            size += sum([get_size(i, seen) for i in obj])\n",
    "    except:\n",
    "        pass\n",
    "    return size\n",
    "print('{} MB'.format(get_size(model)/(1000*1000)))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Export the model for serving\n",
    "\n",
    "model.save() writes out a \"serve\" tag_set"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /usr/local/lib/python3.5/dist-packages/tensorflow_core/python/ops/resource_variable_ops.py:1786: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "If using Keras pass *_constraint arguments to layers.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /usr/local/lib/python3.5/dist-packages/tensorflow_core/python/ops/resource_variable_ops.py:1786: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "If using Keras pass *_constraint arguments to layers.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: export/default/sentiment_20200505_232612/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: export/default/sentiment_20200505_232612/assets\n"
     ]
    }
   ],
   "source": [
    "import os, datetime, shutil\n",
    "shutil.rmtree('export/default', ignore_errors=True)\n",
    "export_path = os.path.join('export', 'default', 'sentiment_{}'.format(datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")))\n",
    "model.save(export_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "export/default\n",
      "export/default/sentiment_20200505_232612\n",
      "export/default/sentiment_20200505_232612/variables\n",
      "export/default/sentiment_20200505_232612/variables/variables.data-00000-of-00001\n",
      "export/default/sentiment_20200505_232612/variables/variables.index\n",
      "export/default/sentiment_20200505_232612/assets\n",
      "export/default/sentiment_20200505_232612/assets/tokens.txt\n",
      "export/default/sentiment_20200505_232612/saved_model.pb\n"
     ]
    }
   ],
   "source": [
    "!find export/default"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Note how much smaller the model itself is ... the assets and variables are constants and can be shared in thread-safe way"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "-rw-r--r-- 1 jupyter jupyter 112K May  5 23:26 export/default/sentiment_20200505_232612/saved_model.pb\n",
      "-rw-r--r-- 1 jupyter jupyter 148K May  5 23:26 export/default/sentiment_20200505_232612/assets/tokens.txt\n",
      "-rw-r--r-- 1 jupyter jupyter 4.5M May  5 23:26 export/default/sentiment_20200505_232612/variables/variables.data-00000-of-00001\n",
      "-rw-r--r-- 1 jupyter jupyter 1.6K May  5 23:26 export/default/sentiment_20200505_232612/variables/variables.index\n"
     ]
    }
   ],
   "source": [
    "!ls -lh {export_path}/saved_model.pb\n",
    "!ls -lh {export_path}/assets/tokens.txt\n",
    "!ls -lh {export_path}/variables/variables.*"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "The given SavedModel SignatureDef contains the following input(s):\n",
      "  inputs['full_text_input'] tensor_info:\n",
      "      dtype: DT_STRING\n",
      "      shape: (-1)\n",
      "      name: serving_default_full_text_input:0\n",
      "The given SavedModel SignatureDef contains the following output(s):\n",
      "  outputs['positive_review_logits'] tensor_info:\n",
      "      dtype: DT_FLOAT\n",
      "      shape: (-1, 1)\n",
      "      name: StatefulPartitionedCall_2:0\n",
      "Method name is: tensorflow/serving/predict\n"
     ]
    }
   ],
   "source": [
    "!saved_model_cli show --dir {export_path} --tag_set serve --signature_def serving_default"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tf.Tensor(\n",
      "[[ 0.6965847]\n",
      " [ 1.61773  ]\n",
      " [-0.7543597]], shape=(3, 1), dtype=float32)\n"
     ]
    }
   ],
   "source": [
    "## illustrates how we can load this model and do inference based on the signature above\n",
    "restored = tf.keras.models.load_model(export_path)\n",
    "review1 = 'The film is based on a prize-winning novel.' # neutral\n",
    "review2 = 'The film is fast moving and has several great action scenes.' # positive\n",
    "review3 = 'The film was very boring. I walked out half-way.' # negative\n",
    "\n",
    "infer = restored.signatures['serving_default']\n",
    "outputs = infer(full_text_input=tf.constant([review1, review2, review3])) # note input name\n",
    "logit = outputs['positive_review_logits']  # note output name\n",
    "print(logit)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[0.6674301 ]\n",
      " [0.83448184]\n",
      " [0.31987208]]\n"
     ]
    }
   ],
   "source": [
    "print(1 / (1 + np.exp(-logit))) # probability"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Custom serving function\n",
    "\n",
    "Let's write out a new signature. But this time, let's carry out the sigmoid operation, so that the model outputs a probability."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: export/probs/sentiment_20200505_232617/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: export/probs/sentiment_20200505_232617/assets\n"
     ]
    }
   ],
   "source": [
    "@tf.function(input_signature=[tf.TensorSpec([None], dtype=tf.string)])\n",
    "def add_prob(reviews):\n",
    "    logits = model(reviews, training=False) # the model is captured via closure\n",
    "    probs = tf.sigmoid(logits)\n",
    "    return {\n",
    "        'positive_review_logits' : logits,\n",
    "        'positive_review_probability' : probs\n",
    "    }\n",
    "shutil.rmtree('export/probs', ignore_errors=True)\n",
    "probs_export_path = os.path.join('export', 'probs', 'sentiment_{}'.format(datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")))\n",
    "model.save(probs_export_path, signatures={'serving_default': add_prob})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "The given SavedModel SignatureDef contains the following input(s):\n",
      "  inputs['reviews'] tensor_info:\n",
      "      dtype: DT_STRING\n",
      "      shape: (-1)\n",
      "      name: serving_default_reviews:0\n",
      "The given SavedModel SignatureDef contains the following output(s):\n",
      "  outputs['positive_review_logits'] tensor_info:\n",
      "      dtype: DT_FLOAT\n",
      "      shape: (-1, 1)\n",
      "      name: StatefulPartitionedCall_2:0\n",
      "  outputs['positive_review_probability'] tensor_info:\n",
      "      dtype: DT_FLOAT\n",
      "      shape: (-1, 1)\n",
      "      name: StatefulPartitionedCall_2:1\n",
      "Method name is: tensorflow/serving/predict\n"
     ]
    }
   ],
   "source": [
    "!saved_model_cli show --dir {probs_export_path} --tag_set serve --signature_def serving_default"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tf.Tensor(\n",
      "[[0.6674301 ]\n",
      " [0.83448184]\n",
      " [0.31987208]], shape=(3, 1), dtype=float32)\n"
     ]
    }
   ],
   "source": [
    "restored = tf.keras.models.load_model(probs_export_path)\n",
    "infer = restored.signatures['serving_default']\n",
    "outputs = infer(reviews=tf.constant([review1, review2, review3])) # note input name\n",
    "probs = outputs['positive_review_probability']  # note output name\n",
    "print(probs)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Deploy to Cloud AI Platform Predictions\n",
    "\n",
    "We can deploy the model to AI Platform Predictions which will take care of scaling"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "export/probs/sentiment_20200505_232617\n"
     ]
    }
   ],
   "source": [
    "!find export/probs | head -2 | tail -1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "imdb v1\n",
      "Model imdb already exists\n",
      "v1 gs://ai-analytics-solutions-kfpdemo/26de3a7744a524256244830bbeb35625e78ad0bd0f7ecd5bda9c8c04d787320b/ READY\n",
      "Deleting version v1\n",
      "Creating version v1 from export/probs/sentiment_20200505_232617\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "This will delete version [v1]...\n",
      "\n",
      "Do you want to continue (Y/n)?  \n",
      "Deleting version [v1]......\n",
      "..........................................................................................................done.\n",
      "Creating version (this might take a few minutes)......\n",
      "..................................................................................................................................................................................................................................................................................................done.\n"
     ]
    }
   ],
   "source": [
    "%%bash\n",
    "\n",
    "MODEL_LOCATION=$(find export/probs | head -2 | tail -1)\n",
    "MODEL_NAME=imdb\n",
    "MODEL_VERSION=v1\n",
    "\n",
    "TFVERSION=2.1\n",
    "REGION=us-central1\n",
    "BUCKET=ai-analytics-solutions-kfpdemo\n",
    "\n",
    "# create the model if it doesn't already exist\n",
    "modelname=$(gcloud ai-platform models list | grep -w \"$MODEL_NAME\")\n",
    "echo $modelname\n",
    "if [ -z \"$modelname\" ]; then\n",
    "   echo \"Creating model $MODEL_NAME\"\n",
    "   gcloud ai-platform models create ${MODEL_NAME} --regions $REGION\n",
    "else\n",
    "   echo \"Model $MODEL_NAME already exists\"\n",
    "fi\n",
    "\n",
    "# delete the model version if it already exists\n",
    "modelver=$(gcloud ai-platform versions list --model \"$MODEL_NAME\" | grep -w \"$MODEL_VERSION\")\n",
    "echo $modelver\n",
    "if [ \"$modelver\" ]; then\n",
    "   echo \"Deleting version $MODEL_VERSION\"\n",
    "   yes | gcloud ai-platform versions delete ${MODEL_VERSION} --model ${MODEL_NAME}\n",
    "   sleep 10\n",
    "fi\n",
    "\n",
    "\n",
    "echo \"Creating version $MODEL_VERSION from $MODEL_LOCATION\"\n",
    "gcloud ai-platform versions create ${MODEL_VERSION} \\\n",
    "       --model ${MODEL_NAME} --origin ${MODEL_LOCATION} --staging-bucket gs://${BUCKET} \\\n",
    "       --runtime-version $TFVERSION"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Overwriting input.json\n"
     ]
    }
   ],
   "source": [
    "%%writefile input.json\n",
    "{\"reviews\": \"The film is based on a prize-winning novel.\"}\n",
    "{\"reviews\": \"The film is fast moving and has several great action scenes.\"}\n",
    "{\"reviews\": \"The film was very boring. I walked out half-way.\"}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "POSITIVE_REVIEW_LOGITS  POSITIVE_REVIEW_PROBABILITY\n",
      "[0.6965846419334412]    [0.6674301028251648]\n",
      "[1.6177300214767456]    [0.8344818353652954]\n",
      "[-0.754359781742096]    [0.31987208127975464]\n"
     ]
    }
   ],
   "source": [
    "!gcloud ai-platform predict --model imdb --json-instances input.json --version v1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "response = {'predictions': [{'positive_review_probability': [0.6674301028251648], 'positive_review_logits': [0.6965846419334412]}, {'positive_review_probability': [0.8344818353652954], 'positive_review_logits': [1.6177300214767456]}, {'positive_review_probability': [0.31987208127975464], 'positive_review_logits': [-0.754359781742096]}]}\n"
     ]
    }
   ],
   "source": [
    "from googleapiclient import discovery\n",
    "from oauth2client.client import GoogleCredentials\n",
    "import json\n",
    "\n",
    "credentials = GoogleCredentials.get_application_default()\n",
    "api = discovery.build(\"ml\", \"v1\", credentials = credentials,\n",
    "            discoveryServiceUrl = \"https://storage.googleapis.com/cloud-ml/discovery/ml_v1_discovery.json\")\n",
    "\n",
    "request_data = {\"instances\":\n",
    "  [\n",
    "      {\"reviews\": \"The film is based on a prize-winning novel.\"},\n",
    "      {\"reviews\": \"The film is fast moving and has several great action scenes.\"},\n",
    "      {\"reviews\": \"The film was very boring. I walked out half-way.\"}\n",
    "  ]\n",
    "}\n",
    "\n",
    "parent = \"projects/{}/models/imdb\".format(\"ai-analytics-solutions\", \"v1\") # use default version\n",
    "\n",
    "response = api.projects().predict(body = request_data, name = parent).execute()\n",
    "print(\"response = {0}\".format(response))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.6674301028251648\n"
     ]
    }
   ],
   "source": [
    "print(response['predictions'][0]['positive_review_probability'][0])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Stateful function\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "24\n",
      "24\n",
      "24\n",
      "24\n",
      "-6\n",
      "24\n",
      "-6\n",
      "24\n"
     ]
    }
   ],
   "source": [
    "def stateless_fn(x):\n",
    "    return 3*x + 15\n",
    "\n",
    "class Stateless:\n",
    "    def __init__(self):\n",
    "        self.weight = 3\n",
    "        self.bias = 15\n",
    "    def __call__(self, x):\n",
    "        return self.weight*x + self.bias\n",
    "\n",
    "class State:\n",
    "    def __init__(self):\n",
    "        self.counter = 0\n",
    "    def __call__(self, x):\n",
    "        self.counter = self.counter + 1\n",
    "        if self.counter % 2 == 0:\n",
    "            return 3*x + 15\n",
    "        else:\n",
    "            return 3*x - 15\n",
    "\n",
    "a1 = Stateless()\n",
    "a = State()\n",
    "print(stateless_fn(3))\n",
    "print(stateless_fn(3))\n",
    "print(a1(3))\n",
    "print(a1(3))\n",
    "print(a(3))\n",
    "print(a(3))\n",
    "print(a(3))\n",
    "print(a(3))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Online prediction\n",
    "\n",
    "Train model in BigQuery"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%%bigquery\n",
    "CREATE OR REPLACE MODEL mlpatterns.neutral_3classes\n",
    "OPTIONS(model_type='logistic_reg', input_label_cols=['health']) AS\n",
    "\n",
    "SELECT \n",
    "  IF(apgar_1min = 10, 'Healthy', IF(apgar_1min >= 8, 'Neutral', 'NeedsAttention')) AS health,\n",
    "  plurality,\n",
    "  mother_age,\n",
    "  gestation_weeks,\n",
    "  ever_born\n",
    "FROM `bigquery-public-data.samples.natality`\n",
    "WHERE apgar_1min <= 10"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "This works, but it is too slow"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>predicted_health</th>\n",
       "      <th>predicted_health_probs</th>\n",
       "      <th>plurality</th>\n",
       "      <th>mother_age</th>\n",
       "      <th>gestation_weeks</th>\n",
       "      <th>ever_born</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>Neutral</td>\n",
       "      <td>[{'prob': 0.6166873057666589, 'label': 'Neutra...</td>\n",
       "      <td>2</td>\n",
       "      <td>32</td>\n",
       "      <td>41</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "  predicted_health                             predicted_health_probs  \\\n",
       "0          Neutral  [{'prob': 0.6166873057666589, 'label': 'Neutra...   \n",
       "\n",
       "   plurality  mother_age  gestation_weeks  ever_born  \n",
       "0          2          32               41          1  "
      ]
     },
     "execution_count": 48,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "%%bigquery\n",
    "SELECT * FROM ML.PREDICT(MODEL mlpatterns.neutral_3classes,\n",
    "    (SELECT \n",
    "     2 AS plurality,\n",
    "     32 AS mother_age,\n",
    "     41 AS gestation_weeks,\n",
    "     1 AS ever_born\n",
    "    )\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Better is to export the model and then deploy that ..."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%%bash\n",
    "BUCKET=ai-analytics-solutions-kfpdemo\n",
    "bq extract -m --destination_format=ML_TF_SAVED_MODEL mlpatterns.neutral_3classes  gs://${BUCKET}/export/baby_health"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 49,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "Creating model babyhealth\n",
      "\n",
      "Creating version v1 from gs://ai-analytics-solutions-kfpdemo/export/baby_health\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Created ml engine model [projects/ai-analytics-solutions/models/babyhealth].\n",
      "Listed 0 items.\n",
      "Creating version (this might take a few minutes)......\n",
      ".......................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................done.\n"
     ]
    }
   ],
   "source": [
    "%%bash\n",
    "\n",
    "TFVERSION=1.15\n",
    "REGION=us-central1\n",
    "BUCKET=ai-analytics-solutions-kfpdemo\n",
    "MODEL_LOCATION=gs://${BUCKET}/export/baby_health\n",
    "MODEL_NAME=babyhealth\n",
    "MODEL_VERSION=v1\n",
    "\n",
    "# create the model if it doesn't already exist\n",
    "modelname=$(gcloud ai-platform models list | grep -w \"$MODEL_NAME\")\n",
    "echo $modelname\n",
    "if [ -z \"$modelname\" ]; then\n",
    "   echo \"Creating model $MODEL_NAME\"\n",
    "   gcloud ai-platform models create ${MODEL_NAME} --regions $REGION\n",
    "else\n",
    "   echo \"Model $MODEL_NAME already exists\"\n",
    "fi\n",
    "\n",
    "# delete the model version if it already exists\n",
    "modelver=$(gcloud ai-platform versions list --model \"$MODEL_NAME\" | grep -w \"$MODEL_VERSION\")\n",
    "echo $modelver\n",
    "if [ \"$modelver\" ]; then\n",
    "   echo \"Deleting version $MODEL_VERSION\"\n",
    "   yes | gcloud ai-platform versions delete ${MODEL_VERSION} --model ${MODEL_NAME}\n",
    "   sleep 10\n",
    "fi\n",
    "\n",
    "\n",
    "echo \"Creating version $MODEL_VERSION from $MODEL_LOCATION\"\n",
    "gcloud ai-platform versions create ${MODEL_VERSION} \\\n",
    "       --model ${MODEL_NAME} --origin ${MODEL_LOCATION} --staging-bucket gs://${BUCKET} \\\n",
    "       --runtime-version $TFVERSION"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 50,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Overwriting input.json\n"
     ]
    }
   ],
   "source": [
    "%%writefile input.json\n",
    "{\"plurality\": 2, \"mother_age\": 32, \"gestation_weeks\": 41, \"ever_born\": 1}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 51,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "HEALTH_PROBS                                                     HEALTH_VALUES                                PREDICTED_HEALTH\n",
      "[0.020730846529093874, 0.6166873057666589, 0.36258184770424734]  [u'Healthy', u'Neutral', u'NeedsAttention']  [u'Neutral']\n"
     ]
    }
   ],
   "source": [
    "!gcloud ai-platform predict --model babyhealth --json-instances input.json --version v1"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Copyright 2020 Google Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License"
   ]
  }
 ],
 "metadata": {
  "environment": {
   "name": "tf2-gpu.2-1.m54",
   "type": "gcloud",
   "uri": "gcr.io/deeplearning-platform-release/tf2-gpu.2-1:m54"
  },
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
