{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pyspark.sql.types as typ\n",
    "from pyspark.ml import Pipeline\n",
    "import pyspark.ml.classification as cl\n",
    "import pyspark.ml.evaluation as ev\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "import os\n",
    "from pyspark.context import SparkContext,SparkConf\n",
    "from pyspark.sql.session import SparkSession"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "sc = SparkContext('local')\n",
    "spark = SparkSession(sc)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "labels = [('INFANT_ALIVE_AT_REPORT', typ.IntegerType()),\n",
    "          ('BIRTH_PLACE', typ.StringType()),\n",
    "          ('MOTHER_AGE_YEARS', typ.IntegerType()),\n",
    "          ('FATHER_COMBINE_AGE', typ.IntegerType()),\n",
    "          ('CIG_BEFORE', typ.IntegerType()),\n",
    "          ('CIG_1_TRI', typ.IntegerType()),\n",
    "          ('CIG_2_TRI', typ.IntegerType()),\n",
    "          ('CIG_3_TRI', typ.IntegerType()),\n",
    "          ('MOTHER_HEIGHT_IN', typ.IntegerType()),\n",
    "          ('MOTHER_PRE_WEIGHT', typ.IntegerType()),\n",
    "          ('MOTHER_DELIVERY_WEIGHT', typ.IntegerType()),\n",
    "          ('MOTHER_WEIGHT_GAIN', typ.IntegerType()),\n",
    "          ('DIABETES_PRE', typ.IntegerType()),\n",
    "          ('DIABETES_GEST', typ.IntegerType()),\n",
    "          ('HYP_TENS_PRE', typ.IntegerType()),\n",
    "          ('HYP_TENS_GEST', typ.IntegerType()),\n",
    "          ('PREV_BIRTH_PRETERM', typ.IntegerType())\n",
    "          ]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+----------------------+-----------+----------------+------------------+----------+---------+---------+---------+----------------+-----------------+----------------------+------------------+------------+-------------+------------+-------------+------------------+\n",
      "|INFANT_ALIVE_AT_REPORT|BIRTH_PLACE|MOTHER_AGE_YEARS|FATHER_COMBINE_AGE|CIG_BEFORE|CIG_1_TRI|CIG_2_TRI|CIG_3_TRI|MOTHER_HEIGHT_IN|MOTHER_PRE_WEIGHT|MOTHER_DELIVERY_WEIGHT|MOTHER_WEIGHT_GAIN|DIABETES_PRE|DIABETES_GEST|HYP_TENS_PRE|HYP_TENS_GEST|PREV_BIRTH_PRETERM|\n",
      "+----------------------+-----------+----------------+------------------+----------+---------+---------+---------+----------------+-----------------+----------------------+------------------+------------+-------------+------------+-------------+------------------+\n",
      "|                     0|          1|              29|                99|         0|        0|        0|        0|              99|              999|                   999|                99|           0|            0|           0|            0|                 0|\n",
      "|                     0|          1|              22|                29|         0|        0|        0|        0|              65|              180|                   198|                18|           0|            0|           0|            0|                 0|\n",
      "|                     0|          1|              38|                40|         0|        0|        0|        0|              63|              155|                   167|                12|           0|            0|           0|            0|                 0|\n",
      "+----------------------+-----------+----------------+------------------+----------+---------+---------+---------+----------------+-----------------+----------------------+------------------+------------+-------------+------------+-------------+------------------+\n",
      "only showing top 3 rows\n",
      "\n"
     ]
    }
   ],
   "source": [
    "schema = typ.StructType([\n",
    "    typ.StructField(e[0], e[1], False) for e in labels\n",
    "])\n",
    "\n",
    "births = spark.read.csv(\n",
    "    '../data/births_transformed.csv.gz', header=True, schema=schema)\n",
    "\n",
    "births.show(3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 创建转换器、评估器\n",
    "import  pyspark.ml.feature as ft\n",
    "\n",
    "births = births.withColumn('BIRTH_PLACE_INT', births['BIRTH_PLACE']\\\n",
    "    .cast(typ.IntegerType()))\n",
    "\n",
    "# birth place使用one-hot编码\n",
    "encoder = ft.OneHotEncoder(inputCol='BIRTH_PLACE_INT',\n",
    "                           outputCol='BIRTH_PLACE_VEC')\n",
    "\n",
    "# 创建单一的列将所有特征整合在一起\n",
    "featuresCreator = ft.VectorAssembler(\n",
    "    inputCols=[col[0] for col in labels[2:]] + [encoder.getOutputCol()],\n",
    "    outputCol='features'\n",
    ")\n",
    "\n",
    "# 创建一个评估器\n",
    "import pyspark.ml.classification as cl\n",
    "\n",
    "logistic = cl.LogisticRegression(maxIter=10,\n",
    "                                regParam=0.01,\n",
    "                                featuresCol=featuresCreator.getOutputCol(),\n",
    "                                labelCol='INFANT_ALIVE_AT_REPORT')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 创建一个管道\n",
    "from pyspark.ml import Pipeline\n",
    "\n",
    "pipeline = Pipeline(stages=[encoder, featuresCreator, logistic])\n",
    "\n",
    "# 拟合模型\n",
    "birth_train, birth_test = births.randomSplit([0.7,0.3],seed=123)\n",
    "\n",
    "model = pipeline.fit(birth_train)\n",
    "test_model = model.transform(birth_test)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.7187355793173213\n",
      "0.6819691176245866\n"
     ]
    }
   ],
   "source": [
    "# 评估模型性能\n",
    "import pyspark.ml.evaluation as ev\n",
    "\n",
    "evaluator = ev.BinaryClassificationEvaluator(\n",
    "    rawPredictionCol='probability',\n",
    "    labelCol='INFANT_ALIVE_AT_REPORT'\n",
    ")\n",
    "\n",
    "print(evaluator.evaluate(test_model, {evaluator.metricName:'areaUnderROC'}))\n",
    "print(evaluator.evaluate(test_model, {evaluator.metricName:'areaUnderPR'}))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[Row(INFANT_ALIVE_AT_REPORT=0, BIRTH_PLACE='1', MOTHER_AGE_YEARS=12, FATHER_COMBINE_AGE=99, CIG_BEFORE=0, CIG_1_TRI=0, CIG_2_TRI=0, CIG_3_TRI=0, MOTHER_HEIGHT_IN=62, MOTHER_PRE_WEIGHT=145, MOTHER_DELIVERY_WEIGHT=152, MOTHER_WEIGHT_GAIN=7, DIABETES_PRE=0, DIABETES_GEST=0, HYP_TENS_PRE=0, HYP_TENS_GEST=0, PREV_BIRTH_PRETERM=0, BIRTH_PLACE_INT=1, BIRTH_PLACE_VEC=SparseVector(9, {1: 1.0}), features=SparseVector(24, {0: 12.0, 1: 99.0, 6: 62.0, 7: 145.0, 8: 152.0, 9: 7.0, 16: 1.0}), rawPrediction=DenseVector([1.101, -1.101]), probability=DenseVector([0.7504, 0.2496]), prediction=0.0)]"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 保存模型pipeline\n",
    "pipelinePath = './infant_oneHotEncoder_Logistic_Pipeline'\n",
    "pipeline.write().overwrite().save(pipelinePath)\n",
    "\n",
    "# 重载模型pipeline\n",
    "loadedPipeline = Pipeline.load(pipelinePath)\n",
    "loadedPipeline.fit(birth_train).transform(birth_test).take(1)\n",
    "\n",
    "\n",
    "# 保存模型\n",
    "from pyspark.ml import PipelineModel\n",
    "\n",
    "modelPath = './infant_oneHotEncoder_LogisticPipelineModel'\n",
    "model.write().overwrite().save(modelPath)\n",
    "\n",
    "# 载入模型\n",
    "loadedPipelineModel = PipelineModel.load(modelPath)\n",
    "test_reloadedModel = loadedPipelineModel.transform(birth_test)\n",
    "test_reloadedModel.take(1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "from pyspark import SparkContext, SparkConf\n",
    "from pyspark.mllib.clustering import KMeans\n",
    "from pyspark.mllib.feature import StandardScaler\n",
    "from collections import OrderedDict\n",
    "from numpy import array\n",
    "from math import sqrt"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "conf = SparkConf().setAppName(\"KDDCup99\")\n",
    "sc = SparkContext(conf=conf)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "data_file = \"data/Kmeans/kddcup.data_10_percent_corrected\"\n",
    "raw_data = sc.textFile(data_file)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "smurf. 280790\n",
      "neptune. 107201\n",
      "normal. 97278\n",
      "back. 2203\n",
      "satan. 1589\n",
      "ipsweep. 1247\n",
      "portsweep. 1040\n",
      "warezclient. 1020\n",
      "teardrop. 979\n",
      "pod. 264\n",
      "nmap. 231\n",
      "guess_passwd. 53\n",
      "buffer_overflow. 30\n",
      "land. 21\n",
      "warezmaster. 20\n",
      "imap. 12\n",
      "rootkit. 10\n",
      "loadmodule. 9\n",
      "ftp_write. 8\n",
      "multihop. 7\n",
      "phf. 4\n",
      "perl. 3\n",
      "spy. 2\n"
     ]
    }
   ],
   "source": [
    "labels = raw_data.map(lambda line: line.strip().split(\",\")[-1])\n",
    "label_counts = labels.countByValue()\n",
    "sorted_labels = OrderedDict(sorted(label_counts.items(), key=lambda t: t[1], reverse=True))\n",
    "for label, count in sorted_labels.items():\n",
    "        print(label,count)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "def parse_interaction(line):\n",
    "    \"\"\"\n",
    "    Parses a network data interaction.\n",
    "    \"\"\"\n",
    "    line_split = line.split(\",\")\n",
    "    clean_line_split = [line_split[0]]+line_split[4:-1]\n",
    "    return (line_split[-1], array([float(x) for x in clean_line_split]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Prepare data for clustering input\n",
    "# the data contains non-numeric features, we want to exclude them since\n",
    "# k-means works with numeric features. These are the first three and the last\n",
    "# column in each data row\n",
    "parsed_data = raw_data.map(parse_interaction)\n",
    "parsed_data_values = parsed_data.values().cache()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Standardize data\n",
    "standardizer = StandardScaler(True, True)\n",
    "standardizer_model = standardizer.fit(parsed_data_values)\n",
    "standardized_data_values = standardizer_model.transform(parsed_data_values)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "def clustering_score(data, k):\n",
    "    clusters = KMeans.train(data, k, maxIterations=10, runs=5, initializationMode=\"random\")\n",
    "    result = (k, clusters, data.map(lambda datum: dist_to_centroid(datum, clusters)).mean())\n",
    "    print(\"Clustering score for k=%(k)d is %(score)f\" \\\n",
    "          % {\"k\": k, \"score\": result[2]})\n",
    "    return result"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "def dist_to_centroid(datum, clusters):\n",
    "    \"\"\"\n",
    "    Determines the distance of a point to its cluster centroid\n",
    "    \"\"\"\n",
    "    cluster = clusters.predict(datum)\n",
    "    centroid = clusters.centers[cluster]\n",
    "    return sqrt(sum([x**2 for x in (centroid - datum)]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "max_k = 25"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Calculating total in within cluster distance for different k values (20 to 25):\n"
     ]
    }
   ],
   "source": [
    "# Evaluate values of k from 5 to 40\n",
    "print(\"Calculating total in within cluster distance for different k values (20 to %(max_k)d):\" \\\n",
    "      % {\"max_k\": max_k})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Clustering score for k=10 is 1.075206\n",
      "Clustering score for k=11 is 1.066830\n",
      "Clustering score for k=12 is 1.003858\n",
      "Clustering score for k=13 is 0.909187\n",
      "Clustering score for k=14 is 1.020935\n",
      "Clustering score for k=15 is 0.994992\n",
      "Clustering score for k=16 is 1.015520\n",
      "Clustering score for k=17 is 0.953584\n",
      "Clustering score for k=18 is 0.821993\n",
      "Clustering score for k=19 is 0.886438\n",
      "Clustering score for k=20 is 0.772973\n",
      "Clustering score for k=21 is 0.914120\n",
      "Clustering score for k=22 is 0.654088\n",
      "Clustering score for k=23 is 0.794124\n",
      "Clustering score for k=24 is 0.887499\n",
      "Clustering score for k=25 is 0.901161\n"
     ]
    }
   ],
   "source": [
    "scores = []\n",
    "for k in range(10,max_k+1):\n",
    "    result = clustering_score(standardized_data_values, k)\n",
    "    scores.append(result)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "min_k:, 22\n"
     ]
    }
   ],
   "source": [
    "# Obtain min score k\n",
    "min_k = min(scores, key=lambda x: x[2])[0]\n",
    "print(\"min_k:,\",min_k)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Best k value is 22\n"
     ]
    }
   ],
   "source": [
    "print(\"Best k value is %(best_k)d\" % {\"best_k\": min_k})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Obtaining clustering result sample for k=22...\n"
     ]
    }
   ],
   "source": [
    "# Use the best model to assign a cluster to each datum\n",
    "# We use here standardized data - it is more appropriate for exploratory purposes\n",
    "print(\"Obtaining clustering result sample for k=%(min_k)d...\" % {\"min_k\": min_k})\n",
    "best_model = min(scores, key=lambda x: x[2])[1]\n",
    "cluster_assignments_sample = standardized_data_values.map(lambda datum: str(best_model.predict(datum))+\",\"+\",\".join(map(str,datum))).sample(False,0.05)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Saving sample to file...\n",
      "DONE!\n"
     ]
    }
   ],
   "source": [
    "# Save assignment sample to file\n",
    "print(\"Saving sample to file...\")\n",
    "cluster_assignments_sample.saveAsTextFile(\"sample_standardized\")\n",
    "print(\"DONE!\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[3, 2, 1]"
      ]
     },
     "execution_count": 24,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "a = [[1,2,3],[3,2,1],[2,54,7],[6,7,2]]\n",
    "min_num = min(a,key=lambda x : x[2])\n",
    "min_num"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
