{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Automated Snow Leopard Detection with Microsoft ML for Apache Spark\n",
    "\n",
    "<img src=\"https://mmlspark.blob.core.windows.net/graphics/SnowLeopardAD/SLTrust.PNG\" width=\"900\" style=\"float: right;\"/>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "import os\n",
    "\n",
    "# WARNING this notebook requires alot of memory.\n",
    "# If you get a heap space error, try dropping the number of images bing returns\n",
    "# or by writing out the images to parquet first\n",
    "\n",
    "# Replace the following with a line like: BING_IMAGE_SEARCH_KEY =  \"hdwo2oyd3o928s.....\"\n",
    "BING_IMAGE_SEARCH_KEY = os.environ[\"BING_IMAGE_SEARCH_KEY\"] #please add your key here"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "from mmlspark import *\n",
    "from mmlspark import FluentAPI\n",
    "import os\n",
    "from pyspark.sql.functions import lit\n",
    "\n",
    "def bingPhotoSearch(name, queries, pages):\n",
    "  offsets = [offset*10 for offset in range(0, pages)] \n",
    "  parameters = [(query, offset) for offset in offsets for query in queries]\n",
    "  \n",
    "  return spark.createDataFrame(parameters, (\"queries\",\"offsets\")) \\\n",
    "    .mlTransform(\n",
    "      BingImageSearch()                             # Apply Bing Image Search\n",
    "        .setSubscriptionKey(BING_IMAGE_SEARCH_KEY)  # Set the API Key\n",
    "        .setOffsetCol(\"offsets\")                    # Specify a column containing the offsets\n",
    "        .setQueryCol(\"queries\")                     # Specify a column containing the query words\n",
    "        .setCount(10)                               # Specify the number of images to return per offset\n",
    "        .setImageType(\"photo\")                      # Specify a filter to ensure we get photos\n",
    "        .setOutputCol(\"images\")) \\\n",
    "    .mlTransform(BingImageSearch.getUrlTransformer(\"images\", \"urls\")) \\\n",
    "    .withColumn(\"labels\", lit(name)) \\\n",
    "    .limit(400)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "<img src=\"https://mmlspark.blob.core.windows.net/graphics/SparkSummit2/cog_services.png\" width=\"800\" style=\"float: center;\"/>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def displayDF(df, n=5, image_cols = set([\"urls\"])):\n",
    "  rows = df.take(n)\n",
    "  cols = df.columns\n",
    "  header = \"\".join([\"<th>\" + c  + \"</th>\" for c in cols])\n",
    "  \n",
    "  style = \"\"\"\n",
    "<!DOCTYPE html>\n",
    "<html>\n",
    "<head>\n",
    "<style>\n",
    "table {\n",
    "    font-family: arial, sans-serif;\n",
    "    border-collapse: collapse;\n",
    "    width: 300;\n",
    "}\n",
    "\n",
    "td, th {\n",
    "    border: 1px solid #dddddd;\n",
    "    text-align: left;\n",
    "    padding: 8px;\n",
    "}\n",
    "\n",
    "tr:nth-child(even) {\n",
    "    background-color: #dddddd;\n",
    "}\n",
    "</style>\n",
    "</head>\"\"\"\n",
    "  \n",
    "  table = []\n",
    "  for row in rows:\n",
    "    table.append(\"<tr>\")\n",
    "    for col in cols:\n",
    "      if col in image_cols:\n",
    "        rep = '<img src=\"{}\",  width=\"100\">'.format(row[col])\n",
    "      else:\n",
    "        rep = row[col]\n",
    "      table.append(\"<td>{}</td>\".format(rep))\n",
    "    table.append(\"</tr>\")\n",
    "  tableHTML = \"\".join(table)\n",
    "  \n",
    "  body = \"\"\"\n",
    "<body>\n",
    "<table>\n",
    "  <tr>\n",
    "    {} \n",
    "  </tr>\n",
    "  {}\n",
    "</table>\n",
    "</body>\n",
    "</html>\n",
    "  \"\"\".format(header, tableHTML)\n",
    "  try:\n",
    "    displayHTML(style + body)\n",
    "  except:\n",
    "    pass"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "snowLeopardQueries = [\"snow leopard\"]\n",
    "snowLeopardUrls = bingPhotoSearch(\"snow leopard\", snowLeopardQueries, pages=100)\n",
    "displayDF(snowLeopardUrls)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "randomWords = spark.read.parquet(\"wasb://publicwasb@mmlspark.blob.core.windows.net/random_words.parquet\").cache()\n",
    "randomWords.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "randomLinks = randomWords \\\n",
    "  .mlTransform(BingImageSearch()\n",
    "    .setSubscriptionKey(BING_IMAGE_SEARCH_KEY)\n",
    "    .setCount(10)\n",
    "    .setQueryCol(\"words\")\n",
    "    .setOutputCol(\"images\")) \\\n",
    "  .mlTransform(BingImageSearch.getUrlTransformer(\"images\", \"urls\")) \\\n",
    "  .withColumn(\"label\", lit(\"other\")) \\\n",
    "  .limit(400)\n",
    "  \n",
    "displayDF(randomLinks)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "images = snowLeopardUrls.union(randomLinks).repartition(100)\\\n",
    "  .mlTransform(BingImageSearch.downloadFromUrls(\"urls\", \"image\", concurrency=5, timeout=5000))\\\n",
    "  .dropna()\n",
    "\n",
    "train, test = images.randomSplit([.7,.3], seed=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from pyspark.ml import Pipeline\n",
    "from pyspark.ml.feature import StringIndexer\n",
    "from pyspark.ml.classification import LogisticRegression\n",
    "from pyspark.sql.functions import udf\n",
    "\n",
    "def getIndex(row):\n",
    "  return float(row[1])\n",
    "\n",
    "try:\n",
    "  network = ModelDownloader(spark, \"Models/\").downloadByName(\"ResNet50\")\n",
    "except:\n",
    "  network = ModelDownloader(spark, \"dbfs:/Models/\").downloadByName(\"ResNet50\")\n",
    "\n",
    "model = Pipeline(stages=[\n",
    "  StringIndexer(inputCol = \"labels\", outputCol=\"index\"),\n",
    "  ImageFeaturizer(inputCol=\"image\", outputCol=\"features\", cutOutputLayers=2).setModel(network),\n",
    "  LogisticRegression(maxIter=5, labelCol=\"index\", regParam=5.0),\n",
    "  UDFTransformer()\\\n",
    "      .setUDF(udf(getIndex, DoubleType()))\\\n",
    "      .setInputCol(\"probability\")\\\n",
    "      .setOutputCol(\"leopard_prob\")\n",
    "])\n",
    "\n",
    "fitModel = model.fit(train)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "<img src=\"https://mmlspark.blob.core.windows.net/graphics/SnowLeopardAD/SLPipeline.PNG\" width=\"900\" style=\"float: right;\"/>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def plotConfusionMatrix(df, label, prediction, classLabels):\n",
    "  from mmlspark.plot import confusionMatrix\n",
    "  import matplotlib.pyplot as plt\n",
    "  fig = plt.figure(figsize=(4.5, 4.5))\n",
    "  confusionMatrix(df, label, prediction, classLabels)\n",
    "  display(fig)\n",
    "\n",
    "plotConfusionMatrix(fitModel.transform(test), \"index\", \"prediction\", fitModel.stages[0].labels)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "import urllib.request\n",
    "test_image_url = \"https://mmlspark.blob.core.windows.net/graphics/SnowLeopardAD/snow_leopard1.jpg\"\n",
    "with urllib.request.urlopen(test_image_url) as url:\n",
    "    barr = url.read()\n",
    "test_subsample = spark.createDataFrame([(bytearray(barr),)], [\"image\"])\n",
    "\n",
    "lime = ImageLIME()\\\n",
    "  .setModel(fitModel)\\\n",
    "  .setPredictionCol(\"leopard_prob\")\\\n",
    "  .setOutputCol(\"weights\")\\\n",
    "  .setInputCol(\"image\")\\\n",
    "  .setCellSize(100.0)\\\n",
    "  .setModifier(50.0)\\\n",
    "  .setNSamples(300)\n",
    "\n",
    "result = lime.transform(test_subsample)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "import PIL\n",
    "import io\n",
    "\n",
    "def plot_superpixels(row):\n",
    "    image_bytes = row['image']\n",
    "    superpixels = row['superpixels']['clusters']\n",
    "    weights = list(row['weights'])\n",
    "    mean_weight = np.percentile(weights,90)\n",
    "    img = (PIL.Image.open(io.BytesIO(image_bytes))).convert('RGBA')\n",
    "    image_array = np.asarray(img).copy()\n",
    "    for (sp, w) in zip(superpixels, weights):\n",
    "        if w > mean_weight:\n",
    "            for (x, y) in sp:\n",
    "                image_array[y, x, 1] = 255\n",
    "                image_array[y, x, 3] = 200\n",
    "    plt.clf()\n",
    "    plt.imshow(image_array)\n",
    "    display()\n",
    "\n",
    "# Gets first row from the LIME-transformed data frame\n",
    "plot_superpixels(result.take(1)[0])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Your results will look like:\n",
    "<img src=\"https://mmlspark.blob.core.windows.net/graphics/SnowLeopardAD/lime_results.png\" width=\"900\" style=\"float: right;\"/>"
   ]
  }
 ],
 "metadata": {
  "anaconda-cloud": {},
  "kernelspec": {
   "display_name": "Python [default]",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
