{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# PySpark Cookbook\n",
    "\n",
    "### Tomasz Drabas, Denny Lee\n",
    "#### Version: 0.1\n",
    "#### Date: 2/28/2018"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Loading the data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 62,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "32561"
     ]
    }
   ],
   "source": [
    "import pyspark.sql.functions as func\n",
    "census_path = '../data/census_income.csv'\n",
    "\n",
    "census = spark.read.csv(\n",
    "    census_path\n",
    "    , header=True\n",
    "    , inferSchema=True\n",
    ")\n",
    "\n",
    "for col, typ in census.dtypes:\n",
    "    if typ == 'string':\n",
    "        census = census.withColumn(\n",
    "            col\n",
    "            , func.ltrim(func.rtrim(census[col]))\n",
    "        )\n",
    "census.count()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 63,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+---+----------------+------+------------+-------------+--------------------+-----------------+-------------+------------------+------+------------+------------+--------------+--------------+-----+\n",
      "|age|       workclass|fnlwgt|   education|education-num|      marital-status|       occupation| relationship|              race|   sex|capital-gain|capital-loss|hours-per-week|native-country|label|\n",
      "+---+----------------+------+------------+-------------+--------------------+-----------------+-------------+------------------+------+------------+------------+--------------+--------------+-----+\n",
      "| 39|       State-gov| 77516|   Bachelors|           13|       Never-married|     Adm-clerical|Not-in-family|             White|  Male|        2174|           0|            40| United-States|<=50K|\n",
      "| 50|Self-emp-not-inc| 83311|   Bachelors|           13|  Married-civ-spouse|  Exec-managerial|      Husband|             White|  Male|           0|           0|            13| United-States|<=50K|\n",
      "| 38|         Private|215646|     HS-grad|            9|            Divorced|Handlers-cleaners|Not-in-family|             White|  Male|           0|           0|            40| United-States|<=50K|\n",
      "| 53|         Private|234721|        11th|            7|  Married-civ-spouse|Handlers-cleaners|      Husband|             Black|  Male|           0|           0|            40| United-States|<=50K|\n",
      "| 28|         Private|338409|   Bachelors|           13|  Married-civ-spouse|   Prof-specialty|         Wife|             Black|Female|           0|           0|            40|          Cuba|<=50K|\n",
      "| 37|         Private|284582|     Masters|           14|  Married-civ-spouse|  Exec-managerial|         Wife|             White|Female|           0|           0|            40| United-States|<=50K|\n",
      "| 49|         Private|160187|         9th|            5|Married-spouse-ab...|    Other-service|Not-in-family|             Black|Female|           0|           0|            16|       Jamaica|<=50K|\n",
      "| 52|Self-emp-not-inc|209642|     HS-grad|            9|  Married-civ-spouse|  Exec-managerial|      Husband|             White|  Male|           0|           0|            45| United-States| >50K|\n",
      "| 31|         Private| 45781|     Masters|           14|       Never-married|   Prof-specialty|Not-in-family|             White|Female|       14084|           0|            50| United-States| >50K|\n",
      "| 42|         Private|159449|   Bachelors|           13|  Married-civ-spouse|  Exec-managerial|      Husband|             White|  Male|        5178|           0|            40| United-States| >50K|\n",
      "| 37|         Private|280464|Some-college|           10|  Married-civ-spouse|  Exec-managerial|      Husband|             Black|  Male|           0|           0|            80| United-States| >50K|\n",
      "| 30|       State-gov|141297|   Bachelors|           13|  Married-civ-spouse|   Prof-specialty|      Husband|Asian-Pac-Islander|  Male|           0|           0|            40|         India| >50K|\n",
      "| 23|         Private|122272|   Bachelors|           13|       Never-married|     Adm-clerical|    Own-child|             White|Female|           0|           0|            30| United-States|<=50K|\n",
      "| 32|         Private|205019|  Assoc-acdm|           12|       Never-married|            Sales|Not-in-family|             Black|  Male|           0|           0|            50| United-States|<=50K|\n",
      "| 40|         Private|121772|   Assoc-voc|           11|  Married-civ-spouse|     Craft-repair|      Husband|Asian-Pac-Islander|  Male|           0|           0|            40|             ?| >50K|\n",
      "| 34|         Private|245487|     7th-8th|            4|  Married-civ-spouse| Transport-moving|      Husband|Amer-Indian-Eskimo|  Male|           0|           0|            45|        Mexico|<=50K|\n",
      "| 25|Self-emp-not-inc|176756|     HS-grad|            9|       Never-married|  Farming-fishing|    Own-child|             White|  Male|           0|           0|            35| United-States|<=50K|\n",
      "| 32|         Private|186824|     HS-grad|            9|       Never-married|Machine-op-inspct|    Unmarried|             White|  Male|           0|           0|            40| United-States|<=50K|\n",
      "| 38|         Private| 28887|        11th|            7|  Married-civ-spouse|            Sales|      Husband|             White|  Male|           0|           0|            50| United-States|<=50K|\n",
      "| 43|Self-emp-not-inc|292175|     Masters|           14|            Divorced|  Exec-managerial|    Unmarried|             White|Female|           0|           0|            45| United-States| >50K|\n",
      "+---+----------------+------+------------+-------------+--------------------+-----------------+-------------+------------------+------+------------+------------+--------------+--------------+-----+\n",
      "only showing top 20 rows"
     ]
    }
   ],
   "source": [
    "census.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 64,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "root\n",
      " |-- age: integer (nullable = true)\n",
      " |-- workclass: string (nullable = true)\n",
      " |-- fnlwgt: integer (nullable = true)\n",
      " |-- education: string (nullable = true)\n",
      " |-- education-num: integer (nullable = true)\n",
      " |-- marital-status: string (nullable = true)\n",
      " |-- occupation: string (nullable = true)\n",
      " |-- relationship: string (nullable = true)\n",
      " |-- race: string (nullable = true)\n",
      " |-- sex: string (nullable = true)\n",
      " |-- capital-gain: integer (nullable = true)\n",
      " |-- capital-loss: integer (nullable = true)\n",
      " |-- hours-per-week: integer (nullable = true)\n",
      " |-- native-country: string (nullable = true)\n",
      " |-- label: string (nullable = true)"
     ]
    }
   ],
   "source": [
    "census.printSchema()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Exploring data"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Data prep"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "List of columns to keep"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 65,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['label', 'age', 'capital-gain', 'capital-loss', 'hours-per-week', 'workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'native-country']"
     ]
    }
   ],
   "source": [
    "cols_to_keep = census.dtypes\n",
    "\n",
    "cols_to_keep = (\n",
    "    ['label','age'\n",
    "     ,'capital-gain'\n",
    "     ,'capital-loss'\n",
    "     ,'hours-per-week'\n",
    "    ] + [\n",
    "        e[0] for e in cols_to_keep[:-1] \n",
    "        if e[1] == 'string'\n",
    "    ]\n",
    ")\n",
    "\n",
    "cols_to_keep"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Get numeric and categorical columns"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 66,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(['age', 'capital-gain', 'capital-loss', 'hours-per-week'], ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'native-country'])"
     ]
    }
   ],
   "source": [
    "import pyspark.mllib.stat as st\n",
    "import numpy as np\n",
    "\n",
    "census_subset = census.select(cols_to_keep)\n",
    "\n",
    "cols_num = [\n",
    "    e[0] for e in census_subset.dtypes \n",
    "    if e[1] == 'int'\n",
    "]\n",
    "cols_cat = [\n",
    "    e[0] for e in census_subset.dtypes[1:] \n",
    "    if e[1] == 'string'\n",
    "]\n",
    "cols_num, cols_cat"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Numerical data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 67,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "age: min->17.0, mean->38.6, max->90.0, stdev->13.6\n",
      "capital-gain: min->0.0, mean->1077.6, max->99999.0, stdev->7385.3\n",
      "capital-loss: min->0.0, mean->87.3, max->4356.0, stdev->403.0\n",
      "hours-per-week: min->1.0, mean->40.4, max->99.0, stdev->12.3"
     ]
    }
   ],
   "source": [
    "rdd_num = (\n",
    "    census_subset\n",
    "    .select(cols_num)\n",
    "    .rdd\n",
    "    .map(lambda row: [e for e in row])\n",
    ")\n",
    "\n",
    "stats_num = st.Statistics.colStats(rdd_num)\n",
    "\n",
    "for col, min_, mean_, max_, var_ in zip(\n",
    "      cols_num\n",
    "    , stats_num.min()\n",
    "    , stats_num.mean()\n",
    "    , stats_num.max()\n",
    "    , stats_num.variance()\n",
    "):\n",
    "    print('{0}: min->{1:.1f}, mean->{2:.1f}, max->{3:.1f}, stdev->{4:.1f}'\n",
    "          .format(col, min_, mean_, max_, np.sqrt(var_)))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Categorical data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 68,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "sex [('Male', 21790), ('Female', 10771)] \n",
      "\n",
      "race [('White', 27816), ('Black', 3124), ('Asian-Pac-Islander', 1039), ('Amer-Indian-Eskimo', 311), ('Other', 271)] \n",
      "\n",
      "label [('<=50K', 24720), ('>50K', 7841)] \n",
      "\n",
      "native-country [('United-States', 29170), ('Mexico', 643), ('?', 583), ('Philippines', 198), ('Germany', 137), ('Canada', 121), ('Puerto-Rico', 114), ('El-Salvador', 106), ('India', 100), ('Cuba', 95), ('England', 90), ('Jamaica', 81), ('South', 80), ('China', 75), ('Italy', 73), ('Dominican-Republic', 70), ('Vietnam', 67), ('Guatemala', 64), ('Japan', 62), ('Poland', 60), ('Columbia', 59), ('Taiwan', 51), ('Haiti', 44), ('Iran', 43), ('Portugal', 37), ('Nicaragua', 34), ('Peru', 31), ('France', 29), ('Greece', 29), ('Ecuador', 28), ('Ireland', 24), ('Hong', 20), ('Trinadad&Tobago', 19), ('Cambodia', 19), ('Laos', 18), ('Thailand', 18), ('Yugoslavia', 16), ('Outlying-US(Guam-USVI-etc)', 14), ('Hungary', 13), ('Honduras', 13), ('Scotland', 12), ('Holand-Netherlands', 1)] \n",
      "\n",
      "marital-status [('Married-civ-spouse', 14976), ('Never-married', 10683), ('Divorced', 4443), ('Separated', 1025), ('Widowed', 993), ('Married-spouse-absent', 418), ('Married-AF-spouse', 23)] \n",
      "\n",
      "workclass [('Private', 22696), ('Self-emp-not-inc', 2541), ('Local-gov', 2093), ('?', 1836), ('State-gov', 1298), ('Self-emp-inc', 1116), ('Federal-gov', 960), ('Without-pay', 14), ('Never-worked', 7)] \n",
      "\n",
      "education [('HS-grad', 10501), ('Some-college', 7291), ('Bachelors', 5355), ('Masters', 1723), ('Assoc-voc', 1382), ('11th', 1175), ('Assoc-acdm', 1067), ('10th', 933), ('7th-8th', 646), ('Prof-school', 576), ('9th', 514), ('12th', 433), ('Doctorate', 413), ('5th-6th', 333), ('1st-4th', 168), ('Preschool', 51)] \n",
      "\n",
      "occupation [('Prof-specialty', 4140), ('Craft-repair', 4099), ('Exec-managerial', 4066), ('Adm-clerical', 3770), ('Sales', 3650), ('Other-service', 3295), ('Machine-op-inspct', 2002), ('?', 1843), ('Transport-moving', 1597), ('Handlers-cleaners', 1370), ('Farming-fishing', 994), ('Tech-support', 928), ('Protective-serv', 649), ('Priv-house-serv', 149), ('Armed-Forces', 9)] \n",
      "\n",
      "relationship [('Husband', 13193), ('Not-in-family', 8305), ('Own-child', 5068), ('Unmarried', 3446), ('Wife', 1568), ('Other-relative', 981)]"
     ]
    }
   ],
   "source": [
    "rdd_cat = (\n",
    "    census_subset\n",
    "    .select(cols_cat + ['label'])\n",
    "    .rdd\n",
    "    .map(lambda row: [e for e in row])\n",
    ")\n",
    "\n",
    "results_cat = {}\n",
    "\n",
    "for i, col in enumerate(cols_cat + ['label']):\n",
    "    results_cat[col] = (\n",
    "        rdd_cat\n",
    "        .groupBy(lambda row: row[i])\n",
    "        .map(lambda el: (el[0], len(el[1])))\n",
    "        .collect()\n",
    "    )\n",
    "\n",
    "for k in results_cat:\n",
    "    print(\n",
    "        k\n",
    "        , sorted(\n",
    "            results_cat[k]\n",
    "            , key=lambda el: el[1]\n",
    "            , reverse=True)\n",
    "        , '\\n')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Correlations"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 69,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "array([[ 1.        ,  0.0776745 ,  0.05777454,  0.06875571],\n",
      "       [ 0.0776745 ,  1.        , -0.03161506,  0.07840862],\n",
      "       [ 0.05777454, -0.03161506,  1.        ,  0.05425636],\n",
      "       [ 0.06875571,  0.07840862,  0.05425636,  1.        ]])"
     ]
    }
   ],
   "source": [
    "correlations = st.Statistics.corr(rdd_num)\n",
    "correlations"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 70,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "age\n",
      "     capital-gain 0.077674498166\n",
      "     capital-loss 0.057774539479\n",
      "     hours-per-week 0.0687557075095\n",
      "\n",
      "capital-gain\n",
      "     age 0.077674498166\n",
      "     hours-per-week 0.0784086153901\n",
      "\n",
      "capital-loss\n",
      "     age 0.057774539479\n",
      "     hours-per-week 0.0542563622727\n",
      "\n",
      "hours-per-week\n",
      "     age 0.0687557075095\n",
      "     capital-gain 0.0784086153901\n",
      "     capital-loss 0.0542563622727"
     ]
    }
   ],
   "source": [
    "for i, el_i in enumerate(abs(correlations) > 0.05):\n",
    "    print(cols_num[i])\n",
    "    \n",
    "    for j, el_j in enumerate(el_i):\n",
    "        if el_j and j != i:\n",
    "            print(\n",
    "                '    '\n",
    "                , cols_num[j]\n",
    "                , correlations[i][j]\n",
    "            )\n",
    "            \n",
    "    print()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Statistical testing"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 71,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.0\n",
      "the occurrence of the outcomes is statistically independent."
     ]
    }
   ],
   "source": [
    "import pyspark.mllib.linalg as ln\n",
    "\n",
    "census_occupation = (\n",
    "    census\n",
    "    .groupby('occupation')\n",
    "    .pivot('label')\n",
    "    .count()\n",
    ")\n",
    "\n",
    "census_occupation_coll = (\n",
    "    census_occupation\n",
    "    .rdd\n",
    "    .map(lambda row: (row[1:]))\n",
    "    .flatMap(lambda row: row)\n",
    "    .collect()\n",
    ")\n",
    "\n",
    "len_row = len(census_occupation.collect())\n",
    "dense_mat = ln.DenseMatrix(\n",
    "    len_row\n",
    "    , 2\n",
    "    , census_occupation_coll\n",
    "    , True\n",
    ")\n",
    "\n",
    "chi_sq = st.Statistics.chiSqTest(dense_mat)\n",
    "\n",
    "print(chi_sq.pValue)\n",
    "print(chi_sq.nullHypothesis)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 72,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "array([[  2.66700000e+03,   9.83000000e+02],\n",
      "       [  2.09800000e+03,   1.96800000e+03],\n",
      "       [  2.28100000e+03,   1.85900000e+03],\n",
      "       [  1.28400000e+03,   8.60000000e+01],\n",
      "       [  8.79000000e+02,   1.15000000e+02],\n",
      "       [  3.17000000e+03,   9.29000000e+02],\n",
      "       [  1.27700000e+03,   3.20000000e+02],\n",
      "       [  1.48000000e+02,   1.00000000e+00],\n",
      "       [  4.38000000e+02,   2.11000000e+02],\n",
      "       [  3.15800000e+03,   1.37000000e+02],\n",
      "       [  6.45000000e+02,   2.83000000e+02],\n",
      "       [  1.75200000e+03,   2.50000000e+02],\n",
      "       [  8.00000000e+00,   1.00000000e+00],\n",
      "       [  1.65200000e+03,   1.91000000e+02],\n",
      "       [  3.26300000e+03,   5.07000000e+02]])"
     ]
    }
   ],
   "source": [
    "dense_mat.toArray()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Transforming the data"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Number of distinct values"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 73,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'sex': 2, 'race': 5, 'native-country': 42, 'marital-status': 7, 'workclass': 9, 'education': 16, 'occupation': 15, 'relationship': 6}"
     ]
    }
   ],
   "source": [
    "len_ftrs = []\n",
    "\n",
    "for col in cols_cat:\n",
    "    (\n",
    "        len_ftrs\n",
    "        .append(\n",
    "            (col\n",
    "             , census\n",
    "                 .select(col)\n",
    "                 .distinct()\n",
    "                 .count()\n",
    "            )\n",
    "        )\n",
    "    )\n",
    "    \n",
    "len_ftrs = dict(len_ftrs)\n",
    "len_ftrs"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Using hashing trick"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 74,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[['<=50K'], [39], [2174], [0], [40], [1.0, 2.0, 1.0, 5.0], [3.0, 3.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0], [2.0, 3.0, 8.0], [0.0, 3.0, 3.0, 1.0, 4.0, 1.0, 0.0], [5.0, 5.0, 3.0], [3.0, 2.0], [4.0], [1.0, 0.0, 0.0, 3.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0, 1.0, 1.0, 2.0, 1.0, 1.0, 0.0]], [['<=50K'], [50], [0], [0], [13], [4.0, 3.0, 1.0, 8.0], [3.0, 3.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0], [5.0, 5.0, 8.0], [0.0, 1.0, 2.0, 2.0, 8.0, 1.0, 1.0], [4.0, 2.0, 1.0], [3.0, 2.0], [4.0], [1.0, 0.0, 0.0, 3.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0, 1.0, 1.0, 2.0, 1.0, 1.0, 0.0]], [['<=50K'], [38], [0], [0], [40], [2.0, 2.0, 0.0, 3.0], [2.0, 2.0, 0.0, 0.0, 0.0, 0.0, 2.0, 1.0], [3.0, 2.0, 3.0], [2.0, 3.0, 1.0, 3.0, 7.0, 0.0, 1.0], [5.0, 5.0, 3.0], [3.0, 2.0], [4.0], [1.0, 0.0, 0.0, 3.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0, 1.0, 1.0, 2.0, 1.0, 1.0, 0.0]]]"
     ]
    }
   ],
   "source": [
    "import pyspark.mllib.feature as feat\n",
    "\n",
    "final_data = (\n",
    "    census\n",
    "    .select(cols_to_keep)\n",
    "    .rdd\n",
    "    .map(lambda row: [\n",
    "        list(\n",
    "            feat.HashingTF(int(len_ftrs[col] / 2.0))\n",
    "            .transform(row[i])\n",
    "            .toArray()\n",
    "        ) if i >= 5\n",
    "        else [row[i]] \n",
    "        for i, col in enumerate(cols_to_keep)]\n",
    "    )\n",
    ")\n",
    "\n",
    "final_data.take(3)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Encode label"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 75,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def labelEncode(label):\n",
    "    return [int(label[0] == '>50K')]\n",
    "\n",
    "final_data = (\n",
    "    final_data\n",
    "    .map(lambda row: labelEncode(row[0]) \n",
    "         + [item \n",
    "            for sublist in row[1:] \n",
    "            for item in sublist]\n",
    "        )\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Standardizing data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 76,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[(0, DenseVector([0.0307, 0.1485, -0.2167, -0.0354, -1.2635, 0.008, 1.7796, 1.0001, 0.83, 0.5743, -0.3473, -0.443, 0.6826, -0.4007, -0.3862, -0.4685, -1.1369, -0.4555, 0.4551, -1.1329, 2.0776, 1.8713, -1.0381, -0.3381, -0.2381, -0.775, 0.9805, 1.5207, 0.3083, -0.0634, -0.2574, -0.7031, 0.3208, -0.0901, -0.1263, 0.3355, -0.1223, 0.3378, -0.0853, -0.0937, -0.0887, -0.2104, 0.0, 0.1286, -0.1976, -0.1433, -0.1419, 0.1895, 0.298, 0.2896, 0.1638, 0.1221, 0.0])), (0, DenseVector([-0.0426, -0.1459, -0.2167, -0.0354, -0.2368, 0.008, -0.5593, -0.2503, -0.2076, -0.1937, -0.3473, -0.443, -0.9076, -0.4007, 1.1933, -0.4685, -0.4505, -1.1655, -2.1192, 0.935, 2.0776, -0.5029, 0.383, 1.0477, -1.306, 1.0328, 0.9805, 1.5207, 0.3083, -0.0634, -0.2574, -0.7031, 0.3208, -0.0901, -0.1263, 0.3355, -0.1223, 0.3378, -0.0853, -0.0937, -0.0887, -0.2104, 0.0, 0.1286, -0.1976, -0.1433, -0.1419, 0.1895, 0.298, 0.2896, 0.1638, 0.1221, 0.0])), (0, DenseVector([-0.7758, -0.1459, -0.2167, -0.0354, -0.2368, 0.008, -0.5593, -0.2503, 0.83, 0.5743, -0.3473, -0.443, 0.6826, -0.4007, -0.3862, -0.4685, 0.9222, 0.9646, 0.4551, 1.969, 0.1557, -0.5029, 0.383, -0.8, 0.8297, 1.0328, -3.1763, -1.2787, 0.3083, -1.3113, 0.2698, 1.4223, -3.115, 11.1029, -0.1263, -2.0213, -0.1223, -2.9329, -0.0853, -0.0937, -0.0887, 4.6556, 0.0, -2.0726, -0.1976, -0.1433, -0.1419, -3.218, -3.15, -3.6612, -4.1258, -3.2729, 0.0]))]"
     ]
    }
   ],
   "source": [
    "standardizer = feat.StandardScaler(True, True)\n",
    "sModel = standardizer.fit(final_data.map(lambda row: row[1:]))\n",
    "final_data_scaled = sModel.transform(final_data.map(lambda row: row[1:]))\n",
    "\n",
    "final_data = (\n",
    "    final_data\n",
    "    .map(lambda row: row[0])\n",
    "    .zipWithIndex()\n",
    "    .map(lambda row: (row[1], row[0]))\n",
    "    .join(\n",
    "        final_data_scaled\n",
    "        .zipWithIndex()\n",
    "        .map(lambda row: (row[1], row[0]))\n",
    "    )\n",
    "    .map(lambda row: row[1])\n",
    ")\n",
    "\n",
    "final_data.take(3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 77,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[(0, DenseVector([0.0307, 0.1485, -0.2167, -0.0354, -1.2635, 0.008, 1.7796, 1.0001, 0.83, 0.5743, -0.3473, -0.443, 0.6826, -0.4007, -0.3862, -0.4685, -1.1369, -0.4555, 0.4551, -1.1329, 2.0776, 1.8713, -1.0381, -0.3381, -0.2381, -0.775, 0.9805, 1.5207, 0.3083, -0.0634, -0.2574, -0.7031, 0.3208, -0.0901, -0.1263, 0.3355, -0.1223, 0.3378, -0.0853, -0.0937, -0.0887, -0.2104, 0.0, 0.1286, -0.1976, -0.1433, -0.1419, 0.1895, 0.298, 0.2896, 0.1638, 0.1221, 0.0]))]"
     ]
    }
   ],
   "source": [
    "final_data.take(1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 78,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(DenseVector([38.5816, 1077.6488, 87.3038, 40.4375, 2.2307, 1.9942, 0.2391, 3.4004, 2.2001, 2.2522, 0.1836, 0.2528, 0.5707, 0.1384, 1.2445, 1.5914, 3.6564, 3.6415, 7.1161, 1.0957, 0.838, 1.4236, 2.461, 4.7319, 1.223, 0.4287, 3.8206, 2.8271, 2.472, 3.0508, 2.4882, 4.6616, 0.9066, 0.008, 0.0157, 2.7153, 0.0157, 0.8967, 0.0072, 0.0108, 0.0078, 0.0432, 0.0, 1.9416, 0.0445, 0.0238, 0.0197, 0.9444, 0.9136, 1.8534, 0.9618, 0.964, 0.0]), DenseVector([13.6404, 7385.2921, 402.9602, 12.3474, 0.974, 0.722, 0.4276, 1.5994, 0.9637, 1.3021, 0.5285, 0.5706, 0.6288, 0.3453, 0.6331, 1.2623, 1.457, 1.4084, 1.9423, 0.9671, 1.0407, 0.8424, 1.4074, 2.1649, 0.9365, 0.5532, 1.2029, 1.4289, 1.7124, 0.8013, 1.8969, 0.941, 0.2911, 0.0893, 0.1243, 0.8486, 0.1281, 0.3057, 0.0846, 0.1151, 0.088, 0.2055, 0.0, 0.4543, 0.2251, 0.1658, 0.1391, 0.2935, 0.29, 0.5062, 0.2331, 0.2945, 0.0]))"
     ]
    }
   ],
   "source": [
    "sModel.mean, sModel.std"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Creating an RDD for training"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 79,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[LabeledPoint(0.0, [0.03067008638,0.148450615588,-0.216656200028,-0.0354289029213,-1.26349550696,0.00799647373215,1.77958633578,1.0001208369,0.830011803082,0.574318091225,-0.347301166352,-0.442992960562,0.682621408268,-0.400707851562,-0.386227594906,-0.46854980554,-1.13687358292,-0.455511851733,0.455078852767,-1.13291606435,2.07757549683,1.87129751533,-1.03808682171,-0.338080942511,-0.238127083048,-0.775013916071,0.980455418851,1.52071905613,0.308340936417,-0.0633904612925,-0.257367850576,-0.703060548727,0.320779847155,-0.0900636281825,-0.126266971505,0.335477418132,-0.122303578935,0.337810645307,-0.085261168002,-0.0936707494514,-0.0886669711795,-0.210415412747,0.0,0.12864878811,-0.197570816433,-0.143327008532,-0.141932215584,0.189518952773,0.297980479635,0.289626591195,0.16375112207,0.122095826183,0.0]), LabeledPoint(0.0, [-0.042641371748,-0.145918242817,-0.216656200028,-0.0354289029213,-0.236826580974,0.00799647373215,-0.559267778422,-0.250332636089,-0.207614485797,-0.193695846447,-0.347301166352,-0.442992960562,-0.907622254507,-0.400707851562,1.19326960652,-0.46854980554,-0.450520990243,-1.16554686558,-2.11924750668,0.93502013944,2.07757549683,-0.50285036975,0.382965267827,1.04765087636,-1.30597668381,1.03279668076,0.980455418851,1.52071905613,0.308340936417,-0.0633904612925,-0.257367850576,-0.703060548727,0.320779847155,-0.0900636281825,-0.126266971505,0.335477418132,-0.122303578935,0.337810645307,-0.085261168002,-0.0936707494514,-0.0886669711795,-0.210415412747,0.0,0.12864878811,-0.197570816433,-0.143327008532,-0.141932215584,0.189518952773,0.297980479635,0.289626591195,0.16375112207,0.122095826183,0.0])]"
     ]
    }
   ],
   "source": [
    "import pyspark.mllib.regression as reg\n",
    "\n",
    "final_data_income = (\n",
    "    final_data\n",
    "    .map(lambda row: reg.LabeledPoint(\n",
    "        row[0]\n",
    "        , row[1]\n",
    "        )\n",
    "    )\n",
    ")\n",
    "final_data_income.take(2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 80,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[LabeledPoint(40.0, [0.0,0.03067008638,0.148450615588,-0.216656200028,-1.26349550696,0.00799647373215,1.77958633578,1.0001208369,0.830011803082,0.574318091225,-0.347301166352,-0.442992960562,0.682621408268,-0.400707851562,-0.386227594906,-0.46854980554,-1.13687358292,-0.455511851733,0.455078852767,-1.13291606435,2.07757549683,1.87129751533,-1.03808682171,-0.338080942511,-0.238127083048,-0.775013916071,0.980455418851,1.52071905613,0.308340936417,-0.0633904612925,-0.257367850576,-0.703060548727,0.320779847155,-0.0900636281825,-0.126266971505,0.335477418132,-0.122303578935,0.337810645307,-0.085261168002,-0.0936707494514,-0.0886669711795,-0.210415412747,0.0,0.12864878811,-0.197570816433,-0.143327008532,-0.141932215584,0.189518952773,0.297980479635,0.289626591195,0.16375112207,0.122095826183,0.0]), LabeledPoint(40.0, [0.0,-0.042641371748,-0.145918242817,-0.216656200028,-0.236826580974,0.00799647373215,-0.559267778422,-0.250332636089,-0.207614485797,-0.193695846447,-0.347301166352,-0.442992960562,-0.907622254507,-0.400707851562,1.19326960652,-0.46854980554,-0.450520990243,-1.16554686558,-2.11924750668,0.93502013944,2.07757549683,-0.50285036975,0.382965267827,1.04765087636,-1.30597668381,1.03279668076,0.980455418851,1.52071905613,0.308340936417,-0.0633904612925,-0.257367850576,-0.703060548727,0.320779847155,-0.0900636281825,-0.126266971505,0.335477418132,-0.122303578935,0.337810645307,-0.085261168002,-0.0936707494514,-0.0886669711795,-0.210415412747,0.0,0.12864878811,-0.197570816433,-0.143327008532,-0.141932215584,0.189518952773,0.297980479635,0.289626591195,0.16375112207,0.122095826183,0.0])]"
     ]
    }
   ],
   "source": [
    "mu, std = sModel.mean[3], sModel.std[3]\n",
    "\n",
    "final_data_hours = (\n",
    "    final_data\n",
    "    .map(lambda row: reg.LabeledPoint(\n",
    "        row[1][3] * std + mu\n",
    "        , ln.Vectors.dense([row[0]] + list(row[1][0:3]) + list(row[1][4:]))\n",
    "        )\n",
    "    )\n",
    ")\n",
    "final_data_hours.take(2)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Splitting into training and testing"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 81,
   "metadata": {},
   "outputs": [],
   "source": [
    "(\n",
    "    final_data_income_train\n",
    "    , final_data_income_test\n",
    ") = (\n",
    "    final_data_income.randomSplit([0.7, 0.3])\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 82,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "(\n",
    "    final_data_hours_train\n",
    "    , final_data_hours_test\n",
    ") = (\n",
    "    final_data_hours.randomSplit([0.7, 0.3])\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Predicting hours of work for census respondents"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Linear regression (benchmark)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 83,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/opt/spark/python/lib/pyspark.zip/pyspark/mllib/regression.py:281: UserWarning: Deprecated in 2.0.0. Use ml.regression.LinearRegression."
     ]
    }
   ],
   "source": [
    "workhours_model_lm = reg.LinearRegressionWithSGD.train(final_data_hours_train)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 84,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "50.0 45.9023510548\n",
      "30.0 0.314537517465\n",
      "50.0 -2.48376636865\n",
      "15.0 1.53025028946\n",
      "40.0 8.01134007899\n",
      "43.0 1.54798295675\n",
      "50.0 -7.27390674472\n",
      "38.0 -0.808751734677\n",
      "48.0 -1.42157623665\n",
      "40.0 -9.26148064215"
     ]
    }
   ],
   "source": [
    "small_sample_hours = sc.parallelize(final_data_hours_test.take(10))\n",
    "\n",
    "for t,p in zip(\n",
    "    small_sample_hours\n",
    "        .map(lambda row: row.label)\n",
    "        .collect()\n",
    "    , workhours_model_lm.predict(\n",
    "        small_sample_hours\n",
    "            .map(lambda row: row.features)\n",
    "    ).collect()):\n",
    "    print(t,p)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 85,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "DenseVector([53.793, -3.4514, -3.271, -1.9501, 0.1113, -0.8569, 0.0895, 1.5276, -2.755, 1.5095, 1.2773, -1.2846, -0.795, 1.5652, 3.5236, 1.5256, -0.8454, -1.1562, -1.0639, 0.5737, -0.4507, -0.1431, -1.7297, 1.4213, 1.5768, -1.5884, 4.26, -0.1054, 4.0529, -0.6732, 0.7465, 0.0164, -0.0078, -0.2661, 0.3984, -0.0136, -0.0142, 0.0518, -0.441, -0.0189, 0.1313, 0.9807, 0.0, -0.301, 1.1183, -0.3137, 0.1066, 0.1851, 0.1046, 0.2166, 0.2525, 0.2499, 0.0])"
     ]
    }
   ],
   "source": [
    "workhours_model_lm.weights"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Forecasting income levels of census respondents"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": true
   },
   "source": [
    "Logistic regression"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 86,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/opt/spark/python/lib/pyspark.zip/pyspark/mllib/classification.py:313: UserWarning: Deprecated in 2.0.0. Use ml.classification.LogisticRegression or LogisticRegressionWithLBFGS."
     ]
    }
   ],
   "source": [
    "import pyspark.mllib.classification as cl\n",
    "\n",
    "income_model_lr = cl.LogisticRegressionWithSGD.train(final_data_income_train)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 87,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.0 0\n",
      "0.0 0\n",
      "0.0 0\n",
      "0.0 0\n",
      "0.0 0\n",
      "0.0 0\n",
      "0.0 0\n",
      "0.0 1\n",
      "0.0 0\n",
      "0.0 1"
     ]
    }
   ],
   "source": [
    "small_sample_income = sc.parallelize(final_data_income_test.take(10))\n",
    "\n",
    "for t,p in zip(\n",
    "    small_sample_income\n",
    "        .map(lambda row: row.label)\n",
    "        .collect()\n",
    "    , income_model_lr.predict(\n",
    "        small_sample_income\n",
    "            .map(lambda row: row.features)\n",
    "    ).collect()):\n",
    "    print(t,p)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 88,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.5"
     ]
    }
   ],
   "source": [
    "income_model_lr.threshold"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 89,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "DenseVector([0.236, 0.7628, 0.2321, 0.2129, 0.0594, 0.1149, -0.0103, -0.1193, 0.2547, -0.0603, -0.1274, 0.0879, 0.0011, -0.0314, -0.2971, -0.1015, 0.2153, 0.2254, 0.019, -0.02, 0.0304, -0.0272, 0.058, -0.0337, -0.0732, 0.1469, -0.1255, -0.0943, -0.164, 0.0358, -0.0447, -0.0872, -0.0007, 0.0, -0.016, -0.0023, 0.0018, -0.0056, 0.0046, -0.0052, 0.0117, -0.0255, 0.0, 0.011, -0.0537, -0.0049, -0.0549, 0.0066, -0.0076, 0.0079, 0.0305, -0.0332, 0.0])"
     ]
    }
   ],
   "source": [
    "income_model_lr.weights"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Support Vector Machines"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 90,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "income_model_svm = cl.SVMWithSGD.train(\n",
    "    final_data_income\n",
    "#     , step=0.95\n",
    "    , miniBatchFraction=1/2.0\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 91,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.0 1\n",
      "0.0 0\n",
      "0.0 0\n",
      "0.0 0\n",
      "0.0 1\n",
      "0.0 0\n",
      "0.0 1\n",
      "0.0 1\n",
      "0.0 0\n",
      "0.0 1"
     ]
    }
   ],
   "source": [
    "for t,p in zip(\n",
    "    small_sample_income\n",
    "        .map(lambda row: row.label)\n",
    "        .collect()\n",
    "    , income_model_svm.predict(\n",
    "        small_sample_income\n",
    "            .map(lambda row: row.features)\n",
    "    ).collect()):\n",
    "    print(t,p)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 92,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "DenseVector([0.1301, 1.0672, 0.1607, 0.0979, 0.0426, 0.084, -0.0115, -0.0772, 0.1667, -0.1179, -0.0835, 0.0359, -0.0265, -0.0687, -0.1805, -0.0576, 0.4046, 0.2522, 0.0732, -0.0664, 0.0039, -0.0032, 0.0656, -0.0723, -0.0431, 0.0966, -0.1211, -0.04, -0.1584, 0.0206, -0.0343, -0.0386, 0.0121, -0.0044, 0.0074, 0.0041, 0.0119, 0.0001, 0.0066, -0.0049, 0.0065, -0.0118, 0.0, 0.0028, -0.0656, 0.0057, -0.1175, 0.0053, -0.0114, 0.0086, 0.0019, -0.0241, 0.0])"
     ]
    }
   ],
   "source": [
    "income_model_svm.weights"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Building clustering models"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 93,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pyspark.mllib.clustering as clu\n",
    "\n",
    "model_km = clu.KMeans.train(\n",
    "    final_data.map(lambda row: row[1])\n",
    "    , 2\n",
    "    , initializationMode='random'\n",
    "    , seed=666\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 94,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.153472872815\n",
      "0.122339061021"
     ]
    }
   ],
   "source": [
    "import sklearn.metrics as m\n",
    "\n",
    "predicted = (\n",
    "    model_km\n",
    "        .predict(\n",
    "            final_data.map(lambda row: row[1])\n",
    "        )\n",
    ")\n",
    "predicted = predicted.collect()\n",
    "\n",
    "true = final_data.map(lambda row: row[0]).collect()\n",
    "\n",
    "print(m.homogeneity_score(true, predicted))\n",
    "print(m.completeness_score(true, predicted))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Computing performance statistics"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 95,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "import pyspark.mllib.evaluation as ev"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": true
   },
   "source": [
    "Regression metrics"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 96,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "true_pred_reg = (\n",
    "    final_data_hours_test\n",
    "    .map(lambda row: (\n",
    "         float(workhours_model_lm.predict(row.features))\n",
    "         , row.label))\n",
    ")\n",
    "\n",
    "metrics_lm = ev.RegressionMetrics(true_pred_reg)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 97,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "R^2:  -6.754451242767173\n",
      "Explained Variance:  1145.7421086452416\n",
      "meanAbsoluteError:  29.866629018908615"
     ]
    }
   ],
   "source": [
    "print('R^2: ', metrics_lm.r2)\n",
    "print('Explained Variance: ', metrics_lm.explainedVariance)\n",
    "print('meanAbsoluteError: ', metrics_lm.meanAbsoluteError)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Classification metrics"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 104,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "areaUnderPR:  0.7050195379236808\n",
      "areaUnderROC:  0.7951114398791014"
     ]
    }
   ],
   "source": [
    "true_pred_class_lr = (\n",
    "    final_data_income_test\n",
    "    .map(lambda row: (\n",
    "        float(income_model_lr.predict(row.features))\n",
    "        , row.label))\n",
    ")\n",
    "\n",
    "metrics_lr = ev.BinaryClassificationMetrics(true_pred_class_lr)\n",
    "\n",
    "print('areaUnderPR: ', metrics_lr.areaUnderPR)\n",
    "print('areaUnderROC: ', metrics_lr.areaUnderROC)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 99,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training Error = 0.26766639276910437"
     ]
    }
   ],
   "source": [
    "trainErr = (\n",
    "    true_pred_class_lr\n",
    "    .filter(lambda lp: lp[0] != lp[1]).count() \n",
    "    / float(true_pred_class_lr.count())\n",
    ")\n",
    "print(\"Training Error = \" + str(trainErr))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 105,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "areaUnderPR:  0.6911561138639338\n",
      "areaUnderROC:  0.7794154787088152"
     ]
    }
   ],
   "source": [
    "true_pred_class_svm = (\n",
    "    final_data_income_test\n",
    "    .map(lambda row: (\n",
    "        float(income_model_svm.predict(row.features))\n",
    "        , row.label))\n",
    ")\n",
    "\n",
    "metrics_svm = ev.BinaryClassificationMetrics(true_pred_class_svm)\n",
    "\n",
    "print('areaUnderPR: ', metrics_svm.areaUnderPR)\n",
    "print('areaUnderROC: ', metrics_svm.areaUnderROC)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 101,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training Error = 0.28358668857847164"
     ]
    }
   ],
   "source": [
    "trainErr = (\n",
    "    true_pred_class_svm\n",
    "    .filter(lambda lp: lp[0] != lp[1]).count() \n",
    "    / float(true_pred_class_svm.count())\n",
    ")\n",
    "\n",
    "print(\"Training Error = \" + str(trainErr))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Confusion matrix"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 102,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[((0.0, 0.0), 4967), ((1.0, 1.0), 2163), ((0.0, 1.0), 196), ((1.0, 0.0), 2410)]"
     ]
    }
   ],
   "source": [
    "(\n",
    "    true_pred_class_lr\n",
    "    .map(lambda el: ((el), 1))\n",
    "    .reduceByKey(lambda x,y: x+y)\n",
    "    .take(4)\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 103,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[((0.0, 0.0), 4848), ((1.0, 1.0), 2127), ((0.0, 1.0), 232), ((1.0, 0.0), 2529)]"
     ]
    }
   ],
   "source": [
    "(\n",
    "    true_pred_class_svm\n",
    "    .map(lambda el: ((el), 1))\n",
    "    .reduceByKey(lambda x,y: x+y)\n",
    "    .take(4)\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "PySpark",
   "language": "",
   "name": "pysparkkernel"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "python",
    "version": 2
   },
   "mimetype": "text/x-python",
   "name": "pyspark",
   "pygments_lexer": "python2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
