{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# PySpark Cookbook\n",
    "\n",
    "### Tomasz Drabas, Denny Lee\n",
    "#### Version: 0.1\n",
    "#### Date: 3/10/2018"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Loading the data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Starting Spark application\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "<table>\n",
       "<tr><th>ID</th><th>YARN Application ID</th><th>Kind</th><th>State</th><th>Spark UI</th><th>Driver log</th><th>Current session?</th></tr><tr><td>0</td><td>None</td><td>pyspark</td><td>idle</td><td></td><td></td><td>✔</td></tr></table>"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "SparkSession available as 'spark'.\n"
     ]
    }
   ],
   "source": [
    "forest_path = '../data/forest_coverage_type.csv'\n",
    "\n",
    "forest = spark.read.csv(\n",
    "    forest_path\n",
    "    , header=True\n",
    "    , inferSchema=True\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "root\n",
      " |-- Elevation: integer (nullable = true)\n",
      " |-- Aspect: integer (nullable = true)\n",
      " |-- Slope: integer (nullable = true)\n",
      " |-- Horizontal_Distance_To_Hydrology: integer (nullable = true)\n",
      " |-- Vertical_Distance_To_Hydrology: integer (nullable = true)\n",
      " |-- Horizontal_Distance_To_Roadways: integer (nullable = true)\n",
      " |-- Hillshade_9am: integer (nullable = true)\n",
      " |-- Hillshade_Noon: integer (nullable = true)\n",
      " |-- Hillshade_3pm: integer (nullable = true)\n",
      " |-- Horizontal_Distance_To_Fire_Points: integer (nullable = true)\n",
      " |-- Wilderness_Area_Rawah: integer (nullable = true)\n",
      " |-- Wilderness_Area_Neota: integer (nullable = true)\n",
      " |-- Wilderness_Area_Comanche: integer (nullable = true)\n",
      " |-- Wilderness_Area_CacheLaPoudre: integer (nullable = true)\n",
      " |-- Soil_type_2702: integer (nullable = true)\n",
      " |-- Soil_type_2703: integer (nullable = true)\n",
      " |-- Soil_type_2704: integer (nullable = true)\n",
      " |-- Soil_type_2705: integer (nullable = true)\n",
      " |-- Soil_type_2706: integer (nullable = true)\n",
      " |-- Soil_type_2717: integer (nullable = true)\n",
      " |-- Soil_type_3501: integer (nullable = true)\n",
      " |-- Soil_type_3502: integer (nullable = true)\n",
      " |-- Soil_type_4201: integer (nullable = true)\n",
      " |-- Soil_type_4703: integer (nullable = true)\n",
      " |-- Soil_type_4704: integer (nullable = true)\n",
      " |-- Soil_type_4744: integer (nullable = true)\n",
      " |-- Soil_type_4758: integer (nullable = true)\n",
      " |-- Soil_type_5101: integer (nullable = true)\n",
      " |-- Soil_type_5151: integer (nullable = true)\n",
      " |-- Soil_type_6101: integer (nullable = true)\n",
      " |-- Soil_type_6102: integer (nullable = true)\n",
      " |-- Soil_type_6731: integer (nullable = true)\n",
      " |-- Soil_type_7101: integer (nullable = true)\n",
      " |-- Soil_type_7102: integer (nullable = true)\n",
      " |-- Soil_type_7103: integer (nullable = true)\n",
      " |-- Soil_type_7201: integer (nullable = true)\n",
      " |-- Soil_type_7202: integer (nullable = true)\n",
      " |-- Soil_type_7700: integer (nullable = true)\n",
      " |-- Soil_type_7701: integer (nullable = true)\n",
      " |-- Soil_type_7702: integer (nullable = true)\n",
      " |-- Soil_type_7709: integer (nullable = true)\n",
      " |-- Soil_type_7710: integer (nullable = true)\n",
      " |-- Soil_type_7745: integer (nullable = true)\n",
      " |-- Soil_type_7746: integer (nullable = true)\n",
      " |-- Soil_type_7755: integer (nullable = true)\n",
      " |-- Soil_type_7756: integer (nullable = true)\n",
      " |-- Soil_type_7757: integer (nullable = true)\n",
      " |-- Soil_type_7790: integer (nullable = true)\n",
      " |-- Soil_type_8703: integer (nullable = true)\n",
      " |-- Soil_type_8707: integer (nullable = true)\n",
      " |-- Soil_type_8708: integer (nullable = true)\n",
      " |-- Soil_type_8771: integer (nullable = true)\n",
      " |-- Soil_type_8772: integer (nullable = true)\n",
      " |-- Soil_type_8776: integer (nullable = true)\n",
      " |-- CoverType: integer (nullable = true)"
     ]
    }
   ],
   "source": [
    "forest.printSchema()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Introducing Transformers"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "List of most popular **Transformers**\n",
    "* Binarizer\n",
    "* Bucketizer\n",
    "* ChiSqSelector\n",
    "* CountVectorizer\n",
    "* DCT\n",
    "* ElementwiseProduct\n",
    "* HashingTF\n",
    "* IDF\n",
    "* IndexToString\n",
    "* MaxAbsScaler\n",
    "* MinMaxScaler\n",
    "* NGram\n",
    "* Normalizer\n",
    "* OneHotEncoder\n",
    "* PCA\n",
    "* PolynomialExpansion\n",
    "* QuantileDiscretizer\n",
    "* RegexTokenizer\n",
    "* RFormula\n",
    "* SQLTransformer\n",
    "* StandardScaler\n",
    "* StopWordsRemover\n",
    "* StringIndexer\n",
    "* Tokenizer\n",
    "* VectorAssembler\n",
    "* VectorIndexer\n",
    "* VectorSlicer\n",
    "* Word2Vec"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pyspark.sql.functions as f\n",
    "import pyspark.ml.feature as feat\n",
    "import numpy as np"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Bucketize"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+--------------------------------+------------------------------------+\n",
      "|Horizontal_Distance_To_Hydrology|Horizontal_Distance_To_Hydrology_Bkt|\n",
      "+--------------------------------+------------------------------------+\n",
      "|                             258|                                 2.0|\n",
      "|                             212|                                 1.0|\n",
      "|                             268|                                 2.0|\n",
      "|                             242|                                 1.0|\n",
      "|                             153|                                 1.0|\n",
      "+--------------------------------+------------------------------------+\n",
      "only showing top 5 rows"
     ]
    }
   ],
   "source": [
    "buckets_no = 10\n",
    "\n",
    "dist_min_max = (\n",
    "    forest.agg(\n",
    "          f.min('Horizontal_Distance_To_Hydrology')\n",
    "            .alias('min')\n",
    "        , f.max('Horizontal_Distance_To_Hydrology')\n",
    "            .alias('max')\n",
    "    )\n",
    "    .rdd\n",
    "    .map(lambda row: (row.min, row.max))\n",
    "    .collect()[0]\n",
    ")\n",
    "\n",
    "rng = dist_min_max[1] - dist_min_max[0]\n",
    "\n",
    "splits = list(np.arange(\n",
    "    dist_min_max[0]\n",
    "    , dist_min_max[1]\n",
    "    , rng / (buckets_no + 1)))\n",
    "\n",
    "bucketizer = feat.Bucketizer(\n",
    "    splits=splits\n",
    "    , inputCol= 'Horizontal_Distance_To_Hydrology'\n",
    "    , outputCol='Horizontal_Distance_To_Hydrology_Bkt'\n",
    ")\n",
    "\n",
    "(\n",
    "    bucketizer\n",
    "    .transform(forest)\n",
    "    .select(\n",
    "         'Horizontal_Distance_To_Hydrology'\n",
    "        ,'Horizontal_Distance_To_Hydrology_Bkt'\n",
    "    ).show(5)\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Principal Components Analysis"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[Row(feat=SparseVector(55, {0: 2596.0, 1: 51.0, 2: 3.0, 3: 258.0, 5: 510.0, 6: 221.0, 7: 232.0, 8: 148.0, 9: 6279.0, 10: 1.0, 42: 1.0, 54: 5.0}), pca_feat=DenseVector([-3887.7711, 4996.8103, 2323.0932, 1014.5873, -135.1702]))]"
     ]
    }
   ],
   "source": [
    "vectorAssembler = (\n",
    "    feat.VectorAssembler(\n",
    "        inputCols=forest.columns, \n",
    "        outputCol='feat'\n",
    "    )\n",
    ")\n",
    "\n",
    "pca = (\n",
    "    feat.PCA(\n",
    "        k=5\n",
    "        , inputCol=vectorAssembler.getOutputCol()\n",
    "        , outputCol='pca_feat'\n",
    "    )\n",
    ")\n",
    "\n",
    "(\n",
    "    pca\n",
    "    .fit(vectorAssembler.transform(forest))\n",
    "    .transform(vectorAssembler.transform(forest))\n",
    "    .select('feat','pca_feat')\n",
    "    .take(1)\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Introducing Estimators"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "List of most popular **Estimators**\n",
    "1. Classification\n",
    " * LinearSVC\n",
    " * LogisticRegression \n",
    " * DecisionTreeClassifier\n",
    " * GBTClassifier\n",
    " * RandomForestClassifier\n",
    " * NaiveBayes\n",
    " * MultilayerPerceptronClassifier\n",
    " * OneVsRest\n",
    "2. Regression\n",
    " * AFTSurvivalRegression\n",
    " * DecisionTreeRegressor\n",
    " * GBTRegressor\n",
    " * GeneralizedLinearRegression\n",
    " * IsotonicRegression\n",
    " * LinearRegression\n",
    " * RandomForestRegressor\n",
    "3. Clustering\n",
    " * BisectingKMeans\n",
    " * Kmeans\n",
    " * GaussianMixture\n",
    " * LDA"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+---------+------+\n",
      "|CoverType| count|\n",
      "+---------+------+\n",
      "|        1|211840|\n",
      "|        6| 17367|\n",
      "|        3| 35754|\n",
      "|        5|  9493|\n",
      "|        4|  2747|\n",
      "|        7| 20510|\n",
      "|        2|283301|\n",
      "+---------+------+"
     ]
    }
   ],
   "source": [
    "forest.select('CoverType').groupBy('CoverType').count().show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Linear SVM"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "DenseVector([-0.0001, -0.0, -0.0023, -0.0, -0.0001, 0.0, -0.001, -0.0017, -0.0003, -0.0, 0.0, 0.0401, -0.0071, -0.0958, -0.0901, -0.0653, -0.0655, -0.0437, -0.0928, -0.0848, -0.0211, -0.0045, -0.0498, -0.0829, -0.0522, -0.0325, -0.0263, -0.0923, -0.0889, -0.0275, -0.0606, -0.0595, 0.0341, -0.003, 0.0822, 0.0607, 0.0351, 0.0093, 0.0048, -0.0154, 0.0422, -0.0673, -0.0039, -0.0142, 0.0036, 0.0078, 0.0, -0.0117, 0.0283, -0.0002, -0.0463, 0.0394, 0.0292, 0.0358])"
     ]
    }
   ],
   "source": [
    "import pyspark.ml.classification as cl\n",
    "\n",
    "vectorAssembler = feat.VectorAssembler(\n",
    "    inputCols=forest.columns[0:-1]\n",
    "    , outputCol='features')\n",
    "\n",
    "fir_dataset = (\n",
    "    vectorAssembler\n",
    "    .transform(forest)\n",
    "    .withColumn(\n",
    "        'label'\n",
    "        , (f.col('CoverType') == 1).cast('integer'))\n",
    "    .select('label', 'features')\n",
    ")\n",
    "\n",
    "svc_obj = cl.LinearSVC(maxIter=10, regParam=0.01)\n",
    "svc_model = svc_obj.fit(fir_dataset)\n",
    "\n",
    "svc_model.coefficients"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Linear Regression"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "DenseVector([0.0309, 0.6522, 0.1911, 0.1424, 0.0342, 0.7402, 1.053, -0.0017, -0.0041, 2.7163, 189.0362, 27.8238, -265.8505, -407.4379, -346.0612, -364.3841, -302.6788, -400.5852, -212.9918, -126.1329, -117.7423, -312.0478, -248.7118, -221.4788, -155.1459, -84.5129, -398.0433, -387.8102, -179.4485, -261.3875, -337.7875, 48.0629, -94.7813, 149.8043, 135.144, 80.0901, 64.3659, 124.0233, -115.0126, 119.1285, -181.7498, 10.8056, -42.7849, 65.5441, 102.2562, 36.9865, -48.1163, 379.2091, 256.0169, 497.1714, 313.0607, 337.172, 397.0758, -14.4551])"
     ]
    }
   ],
   "source": [
    "import pyspark.ml.regression as rg\n",
    "\n",
    "vectorAssembler = feat.VectorAssembler(\n",
    "    inputCols=forest.columns[1:]\n",
    "    , outputCol='features')\n",
    "\n",
    "elevation_dataset = (\n",
    "    vectorAssembler\n",
    "    .transform(forest)\n",
    "    .withColumn(\n",
    "        'label'\n",
    "        , f.col('Elevation').cast('float'))\n",
    "    .select('label', 'features')\n",
    ")\n",
    "    \n",
    "lr_obj = rg.LinearRegression(\n",
    "    maxIter=10\n",
    "    , regParam=0.01\n",
    "    , elasticNetParam=1.00)\n",
    "lr_model = lr_obj.fit(elevation_dataset)\n",
    "\n",
    "lr_model.coefficients"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.7860412464754236 129.50871925702438 103.34079732698483"
     ]
    }
   ],
   "source": [
    "summary = lr_model.summary\n",
    "\n",
    "print(\n",
    "    summary.r2\n",
    "    , summary.rootMeanSquaredError\n",
    "    , summary.meanAbsoluteError\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Introducing Pipelines"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+---------+------------------+\n",
      "|Elevation|        prediction|\n",
      "+---------+------------------+\n",
      "|     2596|2840.7801831411316|\n",
      "|     2590|2828.7464246669683|\n",
      "|     2804| 2842.761272955131|\n",
      "|     2785| 2966.057500325109|\n",
      "|     2595|2817.1687155114637|\n",
      "+---------+------------------+\n",
      "only showing top 5 rows"
     ]
    }
   ],
   "source": [
    "from pyspark.ml import Pipeline\n",
    "\n",
    "vectorAssembler = feat.VectorAssembler(\n",
    "    inputCols=forest.columns[1:]\n",
    "    , outputCol='features')\n",
    "\n",
    "lr_obj = rg.GeneralizedLinearRegression(\n",
    "    labelCol='Elevation'\n",
    "    , maxIter=10\n",
    "    , regParam=0.01\n",
    "    , link='identity'\n",
    "    , linkPredictionCol=\"p\"\n",
    ")\n",
    "\n",
    "pip = Pipeline(stages=[vectorAssembler, lr_obj])\n",
    "\n",
    "(\n",
    "    pip\n",
    "    .fit(forest)\n",
    "    .transform(forest)\n",
    "    .select('Elevation', 'prediction')\n",
    "    .show(5)\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "\n",
    "transformed_df = forest.select('Elevation')\n",
    "transformed_df.toPandas().hist()\n",
    "\n",
    "plt.savefig('Elevation_histogram.png')\n",
    "\n",
    "plt.close('all')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Selecting the most predictable features"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Chi-Square selector"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+--------------------+\n",
      "|            selected|\n",
      "+--------------------+\n",
      "|(10,[0,1,2,3,5,6,...|\n",
      "|(10,[0,1,2,3,4,5,...|\n",
      "|(10,[0,1,2,3,4,5,...|\n",
      "|(10,[0,1,2,3,4,5,...|\n",
      "|(10,[0,1,2,3,4,5,...|\n",
      "+--------------------+\n",
      "only showing top 5 rows"
     ]
    }
   ],
   "source": [
    "vectorAssembler = feat.VectorAssembler(\n",
    "    inputCols=forest.columns[0:-1]\n",
    "    , outputCol='features'\n",
    ")\n",
    "\n",
    "selector = feat.ChiSqSelector(\n",
    "    labelCol='CoverType'\n",
    "    , numTopFeatures=10\n",
    "    , outputCol='selected')\n",
    "\n",
    "pipeline_sel = Pipeline(stages=[vectorAssembler, selector])\n",
    "\n",
    "(\n",
    "    pipeline_sel\n",
    "    .fit(forest)\n",
    "    .transform(forest)\n",
    "    .select(selector.getOutputCol())\n",
    "    .show(5)\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Correlation matrix"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "DenseMatrix([[ 1.        ,  0.01573494, -0.24269664, ...,  0.19359464,\n",
      "               0.21261232, -0.26955378],\n",
      "             [ 0.01573494,  1.        ,  0.07872841, ...,  0.00829428,\n",
      "              -0.00586558,  0.0170798 ],\n",
      "             [-0.24269664,  0.07872841,  1.        , ...,  0.09360193,\n",
      "               0.02563691,  0.14828541],\n",
      "             ...,\n",
      "             [ 0.19359464,  0.00829428,  0.09360193, ...,  1.        ,\n",
      "              -0.01929168,  0.15566826],\n",
      "             [ 0.21261232, -0.00586558,  0.02563691, ..., -0.01929168,\n",
      "               1.        ,  0.1283513 ],\n",
      "             [-0.26955378,  0.0170798 ,  0.14828541, ...,  0.15566826,\n",
      "               0.1283513 ,  1.        ]])"
     ]
    }
   ],
   "source": [
    "import pyspark.ml.stat as st\n",
    "\n",
    "features_and_label = feat.VectorAssembler(\n",
    "    inputCols=forest.columns\n",
    "    , outputCol='features'\n",
    ")\n",
    "\n",
    "corr = st.Correlation.corr(\n",
    "    features_and_label.transform(forest), \n",
    "    'features', \n",
    "    'pearson'\n",
    ")\n",
    "\n",
    "print(str(corr.collect()[0][0]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 67,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "array(['Wilderness_Area_CacheLaPoudre', 'Soil_type_4703',\n",
      "       'Horizontal_Distance_To_Roadways',\n",
      "       'Horizontal_Distance_To_Hydrology', 'CoverType', 'Slope',\n",
      "       'Wilderness_Area_Neota', 'Soil_type_8771', 'Soil_type_2717',\n",
      "       'Soil_type_8776'], dtype='<U34')"
     ]
    }
   ],
   "source": [
    "num_of_features = 10\n",
    "cols = dict([\n",
    "    (i, e) \n",
    "    for i, e \n",
    "    in enumerate(forest.columns)\n",
    "])\n",
    "\n",
    "corr_matrix = corr.collect()[0][0]\n",
    "label_corr_with_idx = [\n",
    "    (i[0], e) \n",
    "    for i, e \n",
    "    in np.ndenumerate(corr_matrix.toArray()[:,0])\n",
    "][1:]\n",
    "\n",
    "label_corr_with_idx_sorted = sorted(\n",
    "    label_corr_with_idx\n",
    "    , key=lambda el: -abs(el[1])\n",
    ")\n",
    "\n",
    "features_selected = np.array([\n",
    "    cols[el[0]] \n",
    "    for el \n",
    "    in label_corr_with_idx_sorted\n",
    "])[0:num_of_features]\n",
    "\n",
    "features_selected"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Predicting forest coverage type"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Logistic regression"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 69,
   "metadata": {},
   "outputs": [],
   "source": [
    "forest_train, forest_test = (\n",
    "    forest\n",
    "    .randomSplit([0.7, 0.3], seed=666)\n",
    ")\n",
    "\n",
    "vectorAssembler = feat.VectorAssembler(\n",
    "    inputCols=forest.columns[0:-1]\n",
    "    , outputCol='features'\n",
    ")\n",
    "\n",
    "selector = feat.ChiSqSelector(\n",
    "    labelCol='CoverType'\n",
    "    , numTopFeatures=10\n",
    "    , outputCol='selected'\n",
    ")\n",
    "\n",
    "logReg_obj = cl.LogisticRegression(\n",
    "    labelCol='CoverType'\n",
    "    , featuresCol=selector.getOutputCol()\n",
    "    , regParam=0.01\n",
    "    , elasticNetParam=1.0\n",
    "    , family='multinomial'\n",
    ")\n",
    "\n",
    "pipeline = Pipeline(\n",
    "    stages=[\n",
    "        vectorAssembler\n",
    "        , selector\n",
    "        , logReg_obj\n",
    "    ])\n",
    "\n",
    "pModel = pipeline.fit(forest_train)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 71,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(0.6638467009427569, 0.6632784396900246, 0.691296432850954)"
     ]
    }
   ],
   "source": [
    "import pyspark.ml.evaluation as ev\n",
    "\n",
    "results_logReg = (\n",
    "    pModel\n",
    "    .transform(forest_test)\n",
    "    .select('CoverType', 'probability', 'prediction')\n",
    ")\n",
    "\n",
    "evaluator = ev.MulticlassClassificationEvaluator(\n",
    "    predictionCol='prediction'\n",
    "    , labelCol='CoverType')\n",
    "\n",
    "(\n",
    "    evaluator.evaluate(results_logReg)\n",
    "    , evaluator.evaluate(\n",
    "        results_logReg\n",
    "        , {evaluator.metricName: 'weightedPrecision'}\n",
    "    ) \n",
    "    , evaluator.evaluate(\n",
    "        results_logReg\n",
    "        , {evaluator.metricName: 'accuracy'}\n",
    "    )\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Random Forest classifier"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 77,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(0.6638467009427569, 0.6632784396900246, 0.691296432850954)"
     ]
    }
   ],
   "source": [
    "rf_obj = cl.RandomForestClassifier(\n",
    "    labelCol='CoverType'\n",
    "    , featuresCol=selector.getOutputCol()\n",
    "    , minInstancesPerNode=10\n",
    "    , numTrees=10\n",
    ")\n",
    "\n",
    "pipeline = Pipeline(\n",
    "    stages=[vectorAssembler, selector, rf_obj]\n",
    ")\n",
    "\n",
    "pModel = pipeline.fit(forest_train)\n",
    "\n",
    "results_rf = (\n",
    "    pModel\n",
    "    .transform(forest_test)\n",
    "    .select('CoverType', 'probability', 'prediction')\n",
    ")\n",
    "\n",
    "evaluator = ev.MulticlassClassificationEvaluator(\n",
    "    predictionCol='prediction'\n",
    "    , labelCol='CoverType')\n",
    "\n",
    "(\n",
    "    evaluator.evaluate(results_rf)\n",
    "    , evaluator.evaluate(\n",
    "        results_rf\n",
    "        , {evaluator.metricName: 'weightedPrecision'}\n",
    "    )\n",
    "    , evaluator.evaluate(\n",
    "        results_rf\n",
    "        , {evaluator.metricName: 'accuracy'}\n",
    "    )\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Estimating forest elevation"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Random Forest regression"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.8264236722093034"
     ]
    }
   ],
   "source": [
    "vectorAssembler = feat.VectorAssembler(\n",
    "    inputCols=forest.columns[1:]\n",
    "    , outputCol='features')\n",
    "\n",
    "rf_obj = rg.RandomForestRegressor(\n",
    "    labelCol='Elevation'\n",
    "    , maxDepth=10\n",
    "    , minInstancesPerNode=10\n",
    "    , minInfoGain=0.1\n",
    "    , numTrees=10\n",
    ")\n",
    "\n",
    "pip = Pipeline(stages=[vectorAssembler, rf_obj])\n",
    "\n",
    "results = (\n",
    "    pip\n",
    "    .fit(forest)\n",
    "    .transform(forest)\n",
    "    .select('Elevation', 'prediction')\n",
    ")\n",
    "\n",
    "evaluator = ev.RegressionEvaluator(labelCol='Elevation')\n",
    "evaluator.evaluate(results, {evaluator.metricName: 'r2'})"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Gradient Boosted Trees regression"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.833598109692272"
     ]
    }
   ],
   "source": [
    "gbt_obj = rg.GBTRegressor(\n",
    "    labelCol='Elevation'\n",
    "    , minInstancesPerNode=10\n",
    "    , minInfoGain=0.1\n",
    ")\n",
    "\n",
    "pip = Pipeline(stages=[vectorAssembler, gbt_obj])\n",
    "\n",
    "results = (\n",
    "    pip\n",
    "    .fit(forest)\n",
    "    .transform(forest)\n",
    "    .select('Elevation', 'prediction')\n",
    ")\n",
    "\n",
    "evaluator = ev.RegressionEvaluator(labelCol='Elevation')\n",
    "evaluator.evaluate(results, {evaluator.metricName: 'r2'})"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Clustering forest cover type"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 79,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pyspark.ml.clustering as clust\n",
    "\n",
    "vectorAssembler = feat.VectorAssembler(\n",
    "    inputCols=forest.columns[:-1]\n",
    "    , outputCol='features')\n",
    "\n",
    "kmeans_obj = clust.KMeans(k=7, seed=666)\n",
    "\n",
    "pip = Pipeline(stages=[vectorAssembler, kmeans_obj])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 80,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+--------------------+---------+----------+\n",
      "|            features|CoverType|prediction|\n",
      "+--------------------+---------+----------+\n",
      "|(54,[0,1,2,3,5,6,...|        5|         1|\n",
      "|(54,[0,1,2,3,4,5,...|        5|         1|\n",
      "|(54,[0,1,2,3,4,5,...|        2|         1|\n",
      "|(54,[0,1,2,3,4,5,...|        2|         1|\n",
      "|(54,[0,1,2,3,4,5,...|        5|         1|\n",
      "+--------------------+---------+----------+\n",
      "only showing top 5 rows"
     ]
    }
   ],
   "source": [
    "results = (\n",
    "    pip\n",
    "    .fit(forest)\n",
    "    .transform(forest)\n",
    "    .select('features', 'CoverType', 'prediction')\n",
    ")\n",
    "\n",
    "results.show(5)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.4999826131644061"
     ]
    }
   ],
   "source": [
    "clustering_ev = ev.ClusteringEvaluator()\n",
    "clustering_ev.evaluate(results)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Tuning hyper parameters"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Grid search"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 82,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pyspark.ml.tuning as tune\n",
    "\n",
    "vectorAssembler = feat.VectorAssembler(\n",
    "    inputCols=forest.columns[0:-1]\n",
    "    , outputCol='features')\n",
    "\n",
    "selector = feat.ChiSqSelector(\n",
    "    labelCol='CoverType'\n",
    "    , numTopFeatures=5\n",
    "    , outputCol='selected')\n",
    "\n",
    "logReg_obj = cl.LogisticRegression(\n",
    "    labelCol='CoverType'\n",
    "    , featuresCol=selector.getOutputCol()\n",
    "    , family='multinomial'\n",
    ")\n",
    "\n",
    "logReg_grid = (\n",
    "    tune.ParamGridBuilder()\n",
    "    .addGrid(logReg_obj.regParam\n",
    "            , [0.01, 0.1]\n",
    "        )\n",
    "    .addGrid(logReg_obj.elasticNetParam\n",
    "            , [1.0, 0.5]\n",
    "        )\n",
    "    .build()\n",
    ")\n",
    "\n",
    "logReg_ev = ev.MulticlassClassificationEvaluator(\n",
    "    predictionCol='prediction'\n",
    "    , labelCol='CoverType')\n",
    "\n",
    "cross_v = tune.CrossValidator(\n",
    "    estimator=logReg_obj\n",
    "    , estimatorParamMaps=logReg_grid\n",
    "    , evaluator=logReg_ev\n",
    ")\n",
    "\n",
    "pipeline = Pipeline(stages=[vectorAssembler, selector])\n",
    "data_trans = pipeline.fit(forest_train)\n",
    "\n",
    "logReg_modelTest = cross_v.fit(\n",
    "    data_trans.transform(forest_train)\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 83,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.6024281861281453\n",
      "0.6602048575905612\n",
      "0.6602048575905614"
     ]
    }
   ],
   "source": [
    "data_trans_test = data_trans.transform(forest_test)\n",
    "results = logReg_modelTest.transform(data_trans_test)\n",
    "\n",
    "print(logReg_ev.evaluate(results, {logReg_ev.metricName: 'weightedPrecision'}))\n",
    "print(logReg_ev.evaluate(results, {logReg_ev.metricName: 'weightedRecall'}))\n",
    "print(logReg_ev.evaluate(results, {logReg_ev.metricName: 'accuracy'}))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Train-validation splitting"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.6024281861281453\n",
      "0.6602048575905612\n",
      "0.6602048575905614"
     ]
    }
   ],
   "source": [
    "train_v = tune.TrainValidationSplit(\n",
    "    estimator=logReg_obj\n",
    "    , estimatorParamMaps=logReg_grid\n",
    "    , evaluator=logReg_ev\n",
    "    , parallelism=4\n",
    ")\n",
    "\n",
    "logReg_modelTrainV = (\n",
    "    train_v\n",
    "    .fit(data_trans.transform(forest_train))\n",
    "\n",
    "results = logReg_modelTrainV.transform(data_trans_test)\n",
    "\n",
    "print(logReg_ev.evaluate(results, {logReg_ev.metricName: 'weightedPrecision'}))\n",
    "print(logReg_ev.evaluate(results, {logReg_ev.metricName: 'weightedRecall'}))\n",
    "print(logReg_ev.evaluate(results, {logReg_ev.metricName: 'accuracy'}))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Feature engineering - NLP"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [],
   "source": [
    "some_text = spark.createDataFrame([\n",
    "    ['''\n",
    "    Apache Spark achieves high performance for both batch\n",
    "    and streaming data, using a state-of-the-art DAG scheduler, \n",
    "    a query optimizer, and a physical execution engine.\n",
    "    ''']\n",
    "    , ['''\n",
    "    Apache Spark is a fast and general-purpose cluster computing \n",
    "    system. It provides high-level APIs in Java, Scala, Python \n",
    "    and R, and an optimized engine that supports general execution \n",
    "    graphs. It also supports a rich set of higher-level tools including \n",
    "    Spark SQL for SQL and structured data processing, MLlib for machine \n",
    "    learning, GraphX for graph processing, and Spark Streaming.\n",
    "    ''']\n",
    "    , ['''\n",
    "    Machine learning is a field of computer science that often uses \n",
    "    statistical techniques to give computers the ability to \"learn\" \n",
    "    (i.e., progressively improve performance on a specific task) \n",
    "    with data, without being explicitly programmed.\n",
    "    ''']\n",
    "], ['text'])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Tokenizer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[Row(text_split=['apache', 'spark', 'achieves', 'high', 'performance', 'for', 'both', 'batch', 'and', 'streaming', 'data', 'using', 'a', 'state-of-the-art', 'dag', 'scheduler', 'a', 'query', 'optimizer', 'and', 'a', 'physical', 'execution', 'engine'])]"
     ]
    }
   ],
   "source": [
    "splitter = feat.RegexTokenizer(\n",
    "    inputCol='text'\n",
    "    , outputCol='text_split'\n",
    "    , pattern='\\s+|[,.\\\"]'\n",
    ")\n",
    "\n",
    "splitter.transform(some_text).select('text_split').take(1)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Stop-words removal"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[Row(no_stopWords=['apache', 'spark', 'achieves', 'high', 'performance', 'batch', 'streaming', 'data', 'using', 'state-of-the-art', 'dag', 'scheduler', 'query', 'optimizer', 'physical', 'execution', 'engine'])]"
     ]
    }
   ],
   "source": [
    "sw_remover = feat.StopWordsRemover(\n",
    "    inputCol=splitter.getOutputCol()\n",
    "    , outputCol='no_stopWords'\n",
    ")\n",
    "\n",
    "sw_remover.transform(splitter.transform(some_text)).select('no_stopWords').take(1)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Hashing trick"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[Row(hashed=SparseVector(20, {2: 2.0, 3: 2.0, 4: 2.0, 5: 3.0, 8: 1.0, 9: 1.0, 15: 3.0, 16: 1.0, 18: 1.0, 19: 1.0}))]"
     ]
    }
   ],
   "source": [
    "hasher = feat.HashingTF(\n",
    "    inputCol=sw_remover.getOutputCol()\n",
    "    , outputCol='hashed'\n",
    "    , numFeatures=20\n",
    ")\n",
    "\n",
    "hasher.transform(sw_remover.transform(splitter.transform(some_text))).select('hashed').take(1)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Term Frequency-Inverse Document Frequency"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 60,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[Row(features=SparseVector(20, {2: 0.0, 3: 0.0, 4: 0.0, 5: 0.863, 8: 0.2877, 9: 0.0, 15: 0.0, 16: 0.6931, 18: 0.2877, 19: 0.0}))]"
     ]
    }
   ],
   "source": [
    "idf = feat.IDF(\n",
    "    inputCol=hasher.getOutputCol()\n",
    "    , outputCol='features'\n",
    ")\n",
    "\n",
    "idfModel = idf.fit(hasher.transform(sw_remover.transform(splitter.transform(some_text))))\n",
    "idfModel.transform(hasher.transform(sw_remover.transform(splitter.transform(some_text)))).select('features').take(1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 62,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[Row(text='\\n    Apache Spark achieves high performance for both batch\\n    and streaming data, using a state-of-the-art DAG scheduler, \\n    a query optimizer, and a physical execution engine.\\n    ', features=SparseVector(20, {2: 0.0, 3: 0.0, 4: 0.0, 5: 0.863, 8: 0.2877, 9: 0.0, 15: 0.0, 16: 0.6931, 18: 0.2877, 19: 0.0}))]"
     ]
    }
   ],
   "source": [
    "pipeline = Pipeline(stages=[splitter, sw_remover, hasher, idf])\n",
    "\n",
    "pipelineModel = pipeline.fit(some_text)\n",
    "pipelineModel.transform(some_text).select('text','features').take(1)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Word-2-Vec model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 57,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[Row(vector=DenseVector([0.0187, -0.0121, -0.0208, -0.0028, 0.002]))]"
     ]
    }
   ],
   "source": [
    "w2v = feat.Word2Vec(\n",
    "    vectorSize=5\n",
    "    , minCount=2\n",
    "    , inputCol=sw_remover.getOutputCol()\n",
    "    , outputCol='vector'\n",
    ")\n",
    "\n",
    "model=w2v.fit(sw_remover.transform(splitter.transform(some_text)))\n",
    "model.transform(sw_remover.transform(splitter.transform(some_text))).select('vector').take(1)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Discretizing continuous variables"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "signal_df = spark.read.csv(\n",
    "    '../data/fourier_signal.csv'\n",
    "    , header=True\n",
    "    , inferSchema=True\n",
    ")\n",
    "\n",
    "steps = feat.QuantileDiscretizer(\n",
    "       numBuckets=10,\n",
    "       inputCol='signal',\n",
    "       outputCol='discretized')\n",
    "\n",
    "transformed = (\n",
    "    steps\n",
    "    .fit(signal_df)\n",
    "    .transform(signal_df)\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 161,
   "metadata": {},
   "outputs": [],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "\n",
    "transformed_df = transformed.toPandas()\n",
    "\n",
    "fig, ax1 = plt.subplots()\n",
    "ax2 = ax1.twinx()\n",
    "\n",
    "ax1.plot(transformed_df['signal'], 'k')\n",
    "ax2.plot(transformed_df['discretized'], 'b-')\n",
    "\n",
    "ax1.set_ylabel('original', color='k')\n",
    "ax2.set_ylabel('discretized', color='b')\n",
    "\n",
    "ax1.set_ylim((-55, 35))\n",
    "ax2.set_ylim((-2, 12))\n",
    "fig.tight_layout()\n",
    "\n",
    "plt.savefig('discretized.png')\n",
    "\n",
    "plt.close('all')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Standardizing continuous variables"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+-------+--------------------+\n",
      "|summary|              signal|\n",
      "+-------+--------------------+\n",
      "|  count|                 256|\n",
      "|   mean|-5.59448321002520...|\n",
      "| stddev|   8.056325329550202|\n",
      "|    min|    -39.878842775021|\n",
      "|    max|  15.718058116309553|\n",
      "+-------+--------------------+"
     ]
    }
   ],
   "source": [
    "signal_df.describe().show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[Row(signal=0.5233399378711634, signal_vec=DenseVector([0.5233]), signal_norm=DenseVector([0.065]))]"
     ]
    }
   ],
   "source": [
    "from pyspark.ml import Pipeline\n",
    "vec = feat.VectorAssembler(\n",
    "    inputCols=['signal']\n",
    "    , outputCol='signal_vec'\n",
    ")\n",
    "\n",
    "norm = feat.StandardScaler(\n",
    "    inputCol=vec.getOutputCol()\n",
    "    , outputCol='signal_norm'\n",
    "    , withMean=True\n",
    "    , withStd=True\n",
    ")\n",
    "\n",
    "norm_pipeline = Pipeline(stages=[vec, norm])\n",
    "signal_norm = (\n",
    "    norm_pipeline\n",
    "    .fit(signal_df)\n",
    "    .transform(signal_df)\n",
    ")\n",
    "\n",
    "signal_norm.take(1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 173,
   "metadata": {},
   "outputs": [],
   "source": [
    "normalized_df = signal_norm.toPandas()\n",
    "normalized_df['normalized'] = normalized_df.apply(lambda row: row[2][0], axis=1)\n",
    "\n",
    "fig, ax1 = plt.subplots()\n",
    "ax2 = ax1.twinx()\n",
    "\n",
    "ax1.plot(normalized_df['signal'], 'k')\n",
    "ax2.plot(normalized_df['normalized'], 'b-')\n",
    "\n",
    "ax1.set_ylabel('original', color='k')\n",
    "ax2.set_ylabel('discretized', color='b')\n",
    "\n",
    "ax1.set_ylim((-105, 30))\n",
    "ax2.set_ylim((-6, 12))\n",
    "fig.tight_layout()\n",
    "\n",
    "plt.savefig('normalized.png')\n",
    "\n",
    "plt.close('all')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Topic mining"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [],
   "source": [
    "articles = spark.createDataFrame([\n",
    "    ('''\n",
    "        The Andromeda Galaxy, named after the mythological \n",
    "        Princess Andromeda, also known as Messier 31, M31, \n",
    "        or NGC 224, is a spiral galaxy approximately 780 \n",
    "        kiloparsecs (2.5 million light-years) from Earth, \n",
    "        and the nearest major galaxy to the Milky Way. \n",
    "        Its name stems from the area of the sky in which it \n",
    "        appears, the constellation of Andromeda. The 2006 \n",
    "        observations by the Spitzer Space Telescope revealed \n",
    "        that the Andromeda Galaxy contains approximately one \n",
    "        trillion stars, more than twice the number of the \n",
    "        Milky Way’s estimated 200-400 billion stars. The \n",
    "        Andromeda Galaxy, spanning approximately 220,000 light \n",
    "        years, is the largest galaxy in our Local Group, \n",
    "        which is also home to the Triangulum Galaxy and \n",
    "        other minor galaxies. The Andromeda Galaxy's mass is \n",
    "        estimated to be around 1.76 times that of the Milky \n",
    "        Way Galaxy (~0.8-1.5×1012 solar masses vs the Milky \n",
    "        Way's 8.5×1011 solar masses).\n",
    "    ''','Galaxy', 'Andromeda')\n",
    "    , ('''\n",
    "        The Milky Way is the galaxy that contains our Solar \n",
    "        System. The descriptive \"milky\" is derived from the \n",
    "        appearance from Earth of the galaxy – a band of light \n",
    "        seen in the night sky formed from stars that cannot be \n",
    "        individually distinguished by the naked eye. The term \n",
    "        Milky Way is a translation of the Latin via lactea, from \n",
    "        the Greek. From Earth, the Milky Way appears as a band \n",
    "        because its disk-shaped structure is viewed from within. \n",
    "        Galileo Galilei first resolved the band of light into \n",
    "        individual stars with his telescope in 1610. Observations \n",
    "        by Edwin Hubble showed that the Milky \n",
    "        Way is just one of many galaxies.\n",
    "    ''','Galaxy','Milky Way')\n",
    "    , ('''\n",
    "        Australia, officially the Commonwealth of Australia, \n",
    "        is a sovereign country comprising the mainland of the \n",
    "        Australian continent, the island of Tasmania and numerous \n",
    "        smaller islands. It is the largest country in Oceania and \n",
    "        the world's sixth-largest country by total area. The \n",
    "        neighbouring countries are Papua New Guinea, Indonesia and \n",
    "        East Timor to the north; the Solomon Islands and Vanuatu to \n",
    "        the north-east; and New Zealand to the south-east. Australia's \n",
    "        capital is Canberra, and its largest city is Sydney.\n",
    "    ''','Geography', 'Australia')\n",
    "    , ('''\n",
    "        The United States of America (USA), commonly known as the United \n",
    "        States (U.S.) or America, is a federal republic composed of 50 \n",
    "        states, a federal district, five major self-governing territories, \n",
    "        and various possessions. At 3.8 million square miles (9.8 million \n",
    "        km2) and with over 325 million people, the United States is the \n",
    "        world's third- or fourth-largest country by total area and the \n",
    "        third-most populous country. The capital is Washington, D.C., and \n",
    "        the largest city by population is New York City. Forty-eight states \n",
    "        and the capital's federal district are contiguous and in North America \n",
    "        between Canada and Mexico. The State of Alaska is in the northwest \n",
    "        corner of North America, bordered by Canada to the east and across \n",
    "        the Bering Strait from Russia to the west. The State of Hawaii is \n",
    "        an archipelago in the mid-Pacific Ocean. The U.S. territories are \n",
    "        scattered about the Pacific Ocean and the Caribbean Sea, stretching \n",
    "        across nine official time zones. The extremely diverse geography, \n",
    "        climate, and wildlife of the United States make it one of the world's \n",
    "        17 megadiverse countries.\n",
    "    ''','Geography', 'USA')\n",
    "    , ('''\n",
    "        China, officially the People's Republic of China (PRC), is a unitary \n",
    "        sovereign state in East Asia and, with a population of around 1.404 \n",
    "        billion, the world's most populous country. Covering 9,600,000 \n",
    "        square kilometers (3,700,000 sq mi), China has the most borders of \n",
    "        any country in the world. Governed by the Communist Party of China, \n",
    "        it exercises jurisdiction over 22 provinces, five autonomous regions, \n",
    "        four direct-controlled municipalities (Beijing, Tianjin, Shanghai, and \n",
    "        Chongqing), and the special administrative regions of Hong Kong and Macau.\n",
    "    ''','Geography', 'China')\n",
    "    , ('''\n",
    "        Poland, officially the Republic of Poland, is a country located in \n",
    "        Central Europe. It is divided into 16 administrative subdivisions, \n",
    "        covering an area of 312,679 square kilometres (120,726 sq mi), and has \n",
    "        a largely temperate seasonal climate. With a population of approximately \n",
    "        38.5 million people, Poland is the sixth most populous member state of \n",
    "        the European Union. Poland's capital and largest metropolis is \n",
    "        Warsaw.\n",
    "    ''','Geography', 'Poland')\n",
    "    , ('''\n",
    "        The domestic dog (Canis lupus familiaris when considered a subspecies \n",
    "        of the gray wolf or Canis familiaris when considered a distinct species) \n",
    "        is a member of the genus Canis (canines), which forms part of the \n",
    "        wolf-like canids, and is the most widely abundant terrestrial carnivore.\n",
    "        The dog and the extant gray wolf are sister taxa as modern wolves are \n",
    "        not closely related to the wolves that were first domesticated, which \n",
    "        implies that the direct ancestor of the dog is extinct. The dog was \n",
    "        the first species to be domesticated and has been selectively bred over \n",
    "        millennia for various behaviors, sensory capabilities, and physical attributes.\n",
    "    ''','Animal', 'Dog')\n",
    "    , ('''\n",
    "        The origin of the domestic dog is not clear. It is known that the dog was \n",
    "        the first domesticated species. The domestic dog is a member of the genus \n",
    "        Canis (canines), which forms part of the wolf-like canids, and is the most \n",
    "        widely abundant terrestrial carnivore. The closest living relative of the \n",
    "        dog is the gray wolf and there is no evidence of any other canine \n",
    "        contributing to its genetic lineage. The dog and the extant gray wolf \n",
    "        form two sister clades, with modern wolves not closely related to the \n",
    "        wolves that were first domesticated. The archaeological record shows \n",
    "        the first undisputed dog remains buried beside humans 14,700 years ago, \n",
    "        with disputed remains occurring 36,000 years ago. These dates imply \n",
    "        that the earliest dogs arose in the time of human hunter-gatherers \n",
    "        and not agriculturists.\n",
    "    ''','Animal', 'Dog')\n",
    "    , ('''\n",
    "        Washington, officially the State of Washington, is a state in the Pacific \n",
    "        Northwest region of the United States. Named after George Washington, \n",
    "        the first president of the United States, the state was made out of the \n",
    "        western part of the Washington Territory, which was ceded by Britain in \n",
    "        1846 in accordance with the Oregon Treaty in the settlement of the \n",
    "        Oregon boundary dispute. It was admitted to the Union as the 42nd state \n",
    "        in 1889. Olympia is the state capital. Washington is sometimes referred \n",
    "        to as Washington State, to distinguish it from Washington, D.C., the \n",
    "        capital of the United States, which is often shortened to Washington.\n",
    "    ''','Geography', 'Washington State')    \n",
    "], ['articles', 'Topic', 'Object'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Galaxy Andromeda 2 [0.003053456550444906,0.0033317477861422363,0.9936147956634129]\n",
      "Galaxy Milky Way 2 [0.004752646858051239,0.0050467276024757125,0.9902006255394731]\n",
      "Geography Australia 1 [0.00632938201257351,0.9877519489900843,0.005918668997342191]\n",
      "Geography USA 1 [0.002525770470526258,0.9951088020926291,0.002365427436844653]\n",
      "Geography China 1 [0.0051541381704948135,0.6008937537867546,0.3939521080427506]\n",
      "Geography Poland 1 [0.006814345676648856,0.986849415140345,0.006336239183006135]\n",
      "Animal Dog 0 [0.9901640623662747,0.005226762717124236,0.004609174916600995]\n",
      "Animal Dog 0 [0.9926300349445092,0.003938103061207765,0.0034318619942831073]\n",
      "Geography Washington State 1 [0.005261811808175384,0.9898606664191076,0.004877521772717041]"
     ]
    }
   ],
   "source": [
    "import pyspark.ml.clustering as clust\n",
    "\n",
    "splitter = feat.RegexTokenizer(\n",
    "    inputCol='articles'\n",
    "    , outputCol='articles_split'\n",
    "    , pattern='\\s+|[,.\\\"]'\n",
    ")\n",
    "\n",
    "sw_remover = feat.StopWordsRemover(\n",
    "    inputCol=splitter.getOutputCol()\n",
    "    , outputCol='no_stopWords'\n",
    ")\n",
    "\n",
    "count_vec = feat.CountVectorizer(\n",
    "    inputCol=sw_remover.getOutputCol()\n",
    "    , outputCol='vector'\n",
    ")\n",
    "\n",
    "lda_clusters = clust.LDA(\n",
    "    k=3\n",
    "    , optimizer='online'\n",
    "    , featuresCol=count_vec.getOutputCol()\n",
    ")\n",
    "\n",
    "topic_pipeline = Pipeline(\n",
    "    stages=[\n",
    "        splitter\n",
    "        , sw_remover\n",
    "        , count_vec\n",
    "        , lda_clusters\n",
    "    ]\n",
    ")\n",
    "\n",
    "for topic in ( \n",
    "        topic_pipeline\n",
    "        .fit(articles)\n",
    "        .transform(articles)\n",
    "        .select('Topic','Object','topicDistribution')\n",
    "        .take(10)\n",
    "):\n",
    "    print(\n",
    "        topic.Topic\n",
    "        , topic.Object\n",
    "        , np.argmax(topic.topicDistribution)\n",
    "        , topic.topicDistribution\n",
    "    )\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "PySpark",
   "language": "",
   "name": "pysparkkernel"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "python",
    "version": 2
   },
   "mimetype": "text/x-python",
   "name": "pyspark",
   "pygments_lexer": "python2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
