{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# PySpark数据处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+------+-----+--------+----+-------+------+--------+\n",
      "|number|class|language|math|english|physic|chemical|\n",
      "+------+-----+--------+----+-------+------+--------+\n",
      "|   001|    1|     100|  87|     67|    83|      98|\n",
      "|   002|    2|      87|  81|     90|    83|      83|\n",
      "|   003|    3|      86|  91|     83|    89|      63|\n",
      "|   004|    2|      65|  87|     94|    73|      88|\n",
      "|   005|    1|      76|  62|     89|    81|      98|\n",
      "|   006|    3|      84|  82|     85|    73|      99|\n",
      "|   007|    3|      56|  76|     63|    72|      87|\n",
      "|   008|    1|      55|  62|     46|    78|      71|\n",
      "|   009|    2|      63|  72|     87|    98|      64|\n",
      "+------+-----+--------+----+-------+------+--------+\n",
      "\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "from pyspark.sql import SparkSession\n",
    "\n",
    "# 创建dataframe\n",
    "spark = SparkSession.builder.appName('pyspark').getOrCreate()\n",
    "# 原始数据 \n",
    "test = spark.createDataFrame([('001','1',100,87,67,83,98), ('002','2',87,81,90,83,83), ('003','3',86,91,83,89,63),\n",
    "                            ('004','2',65,87,94,73,88), ('005','1',76,62,89,81,98), ('006','3',84,82,85,73,99),\n",
    "                            ('007','3',56,76,63,72,87), ('008','1',55,62,46,78,71), ('009','2',63,72,87,98,64)],\n",
    "                             ['number','class','language','math','english','physic','chemical'])\n",
    "test.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+------+-----+--------+----+-------+------+--------+\n",
      "|number|class|language|math|english|physic|chemical|\n",
      "+------+-----+--------+----+-------+------+--------+\n",
      "|   002|    2|      87|  81|     90|    83|      83|\n",
      "|   003|    3|      86|  91|     83|    89|      63|\n",
      "|   004|    2|      65|  87|     94|    73|      88|\n",
      "|   006|    3|      84|  82|     85|    73|      99|\n",
      "|   007|    3|      56|  76|     63|    72|      87|\n",
      "|   009|    2|      63|  72|     87|    98|      64|\n",
      "+------+-----+--------+----+-------+------+--------+\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# 查询class大于1的\n",
    "filter_1 = test.filter('class > 1')\n",
    "filter_1.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+------+-----+--------+----+-------+------+--------+\n",
      "|number|class|language|math|english|physic|chemical|\n",
      "+------+-----+--------+----+-------+------+--------+\n",
      "|   003|    3|      86|  91|     83|    89|      63|\n",
      "+------+-----+--------+----+-------+------+--------+\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# 查询language >90 或 math> 90\n",
    "filter_2 = test.filter('language > 1 and math > 90')\n",
    "filter_2.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# PySpark数据统计"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 253,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+--------------------+------+------+-----+---+------+-------+------+------+-----+----------+---------+\n",
      "|                Name|Type 1|Type 2|Total| HP|Attack|Defense|Sp Atk|Sp Def|Speed|Generation|Legendary|\n",
      "+--------------------+------+------+-----+---+------+-------+------+------+-----+----------+---------+\n",
      "|           Bulbasaur| Grass|Poison|  318| 45|    49|     49|    65|    65|   45|         1|    false|\n",
      "|             Ivysaur| Grass|Poison|  405| 60|    62|     63|    80|    80|   60|         1|    false|\n",
      "|            Venusaur| Grass|Poison|  525| 80|    82|     83|   100|   100|   80|         1|    false|\n",
      "|VenusaurMega Venu...| Grass|Poison|  625| 80|   100|    123|   122|   120|   80|         1|    false|\n",
      "|          Charmander|  Fire|  null|  309| 39|    52|     43|    60|    50|   65|         1|    false|\n",
      "|          Charmeleon|  Fire|  null|  405| 58|    64|     58|    80|    65|   80|         1|    false|\n",
      "|           Charizard|  Fire|Flying|  534| 78|    84|     78|   109|    85|  100|         1|    false|\n",
      "|CharizardMega Cha...|  Fire|Dragon|  634| 78|   130|    111|   130|    85|  100|         1|    false|\n",
      "|CharizardMega Cha...|  Fire|Flying|  634| 78|   104|     78|   159|   115|  100|         1|    false|\n",
      "|            Squirtle| Water|  null|  314| 44|    48|     65|    50|    64|   43|         1|    false|\n",
      "|           Wartortle| Water|  null|  405| 59|    63|     80|    65|    80|   58|         1|    false|\n",
      "|           Blastoise| Water|  null|  530| 79|    83|    100|    85|   105|   78|         1|    false|\n",
      "|BlastoiseMega Bla...| Water|  null|  630| 79|   103|    120|   135|   115|   78|         1|    false|\n",
      "|            Caterpie|   Bug|  null|  195| 45|    30|     35|    20|    20|   45|         1|    false|\n",
      "|             Metapod|   Bug|  null|  205| 50|    20|     55|    25|    25|   30|         1|    false|\n",
      "|          Butterfree|   Bug|Flying|  395| 60|    45|     50|    90|    80|   70|         1|    false|\n",
      "|              Weedle|   Bug|Poison|  195| 40|    35|     30|    20|    20|   50|         1|    false|\n",
      "|              Kakuna|   Bug|Poison|  205| 45|    25|     50|    25|    25|   35|         1|    false|\n",
      "|            Beedrill|   Bug|Poison|  395| 65|    90|     40|    45|    80|   75|         1|    false|\n",
      "|BeedrillMega Beed...|   Bug|Poison|  495| 65|   150|     40|    15|    80|  145|         1|    false|\n",
      "+--------------------+------+------+-----+---+------+-------+------+------+-----+----------+---------+\n",
      "only showing top 20 rows\n",
      "\n"
     ]
    }
   ],
   "source": [
    "from pyspark.sql import SparkSession\n",
    "from pyspark import SparkFiles\n",
    "import pandas as pd\n",
    "\n",
    "spark.sparkContext.addFile('https://cdn.coggle.club/Pokemon.csv')\n",
    "df = spark.read.csv(\"file://\"+SparkFiles.get(\"Pokemon.csv\"), header=True, inferSchema= True)\n",
    "df = df.withColumnRenamed('Sp. Atk', 'Sp Atk')\n",
    "df = df.withColumnRenamed('Sp. Def', 'Sp Def')\n",
    "df.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 261,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "-----spark 分析各列的类型-----\n",
      "[('Name', 'string'), ('Type 1', 'string'), ('Type 2', 'string'), ('Total', 'int'), ('HP', 'int'), ('Attack', 'int'), ('Defense', 'int'), ('Sp Atk', 'int'), ('Sp Def', 'int'), ('Speed', 'int'), ('Generation', 'int'), ('Legendary', 'boolean')]\n"
     ]
    }
   ],
   "source": [
    "from pyspark.sql.functions import count_distinct\n",
    "# 保存df\n",
    "df.write.csv(path='./Pokemon.csv', header=True, sep=',', mode='overwrite')\n",
    "# 统计\n",
    "print('-----spark 分析各列的类型-----')\n",
    "print(df.dtypes)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 262,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "-----spark 分析各列的取值个数-----\n",
      "[Row(count(Name)=799)]\n",
      "[Row(count(Type 1)=18)]\n",
      "[Row(count(Type 2)=18)]\n",
      "[Row(count(Total)=200)]\n",
      "[Row(count(HP)=94)]\n",
      "[Row(count(Attack)=111)]\n",
      "[Row(count(Defense)=103)]\n",
      "[Row(count(Sp Atk)=105)]\n",
      "[Row(count(Sp Def)=92)]\n",
      "[Row(count(Speed)=108)]\n",
      "[Row(count(Generation)=6)]\n",
      "[Row(count(Legendary)=2)]\n"
     ]
    }
   ],
   "source": [
    "# 计算每列空值数目\n",
    "print('-----spark 分析各列的取值个数-----')\n",
    "for col in df.columns:\n",
    "    print(df.agg(count_distinct(df[col])).collect())\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 263,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Name \t with null values:  0\n",
      "Type 1 \t with null values:  0\n",
      "Type 2 \t with null values:  386\n",
      "Total \t with null values:  0\n",
      "HP \t with null values:  0\n",
      "Attack \t with null values:  0\n",
      "Defense \t with null values:  0\n",
      "Sp Atk \t with null values:  0\n",
      "Sp Def \t with null values:  0\n",
      "Speed \t with null values:  0\n",
      "Generation \t with null values:  0\n",
      "Legendary \t with null values:  0\n"
     ]
    }
   ],
   "source": [
    "# 计算每列空值数目\n",
    "for col in df.columns:\n",
    "    print(col, \"\\t\", \"with null values: \", \n",
    "    df.filter(df[col].isNull()).count())"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# PySpark分组聚合"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 50,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+--------------------+------+------+-----+---+------+-------+------+------+-----+----------+---------+\n",
      "|                Name|Type 1|Type 2|Total| HP|Attack|Defense|Sp Atk|Sp Def|Speed|Generation|Legendary|\n",
      "+--------------------+------+------+-----+---+------+-------+------+------+-----+----------+---------+\n",
      "|           Bulbasaur| Grass|Poison|  318| 45|    49|     49|    65|    65|   45|         1|    false|\n",
      "|             Ivysaur| Grass|Poison|  405| 60|    62|     63|    80|    80|   60|         1|    false|\n",
      "|            Venusaur| Grass|Poison|  525| 80|    82|     83|   100|   100|   80|         1|    false|\n",
      "|VenusaurMega Venu...| Grass|Poison|  625| 80|   100|    123|   122|   120|   80|         1|    false|\n",
      "|          Charmander|  Fire|  null|  309| 39|    52|     43|    60|    50|   65|         1|    false|\n",
      "|          Charmeleon|  Fire|  null|  405| 58|    64|     58|    80|    65|   80|         1|    false|\n",
      "|           Charizard|  Fire|Flying|  534| 78|    84|     78|   109|    85|  100|         1|    false|\n",
      "|CharizardMega Cha...|  Fire|Dragon|  634| 78|   130|    111|   130|    85|  100|         1|    false|\n",
      "|CharizardMega Cha...|  Fire|Flying|  634| 78|   104|     78|   159|   115|  100|         1|    false|\n",
      "|            Squirtle| Water|  null|  314| 44|    48|     65|    50|    64|   43|         1|    false|\n",
      "|           Wartortle| Water|  null|  405| 59|    63|     80|    65|    80|   58|         1|    false|\n",
      "|           Blastoise| Water|  null|  530| 79|    83|    100|    85|   105|   78|         1|    false|\n",
      "|BlastoiseMega Bla...| Water|  null|  630| 79|   103|    120|   135|   115|   78|         1|    false|\n",
      "|            Caterpie|   Bug|  null|  195| 45|    30|     35|    20|    20|   45|         1|    false|\n",
      "|             Metapod|   Bug|  null|  205| 50|    20|     55|    25|    25|   30|         1|    false|\n",
      "|          Butterfree|   Bug|Flying|  395| 60|    45|     50|    90|    80|   70|         1|    false|\n",
      "|              Weedle|   Bug|Poison|  195| 40|    35|     30|    20|    20|   50|         1|    false|\n",
      "|              Kakuna|   Bug|Poison|  205| 45|    25|     50|    25|    25|   35|         1|    false|\n",
      "|            Beedrill|   Bug|Poison|  395| 65|    90|     40|    45|    80|   75|         1|    false|\n",
      "|BeedrillMega Beed...|   Bug|Poison|  495| 65|   150|     40|    15|    80|  145|         1|    false|\n",
      "+--------------------+------+------+-----+---+------+-------+------+------+-----+----------+---------+\n",
      "only showing top 20 rows\n",
      "\n"
     ]
    }
   ],
   "source": [
    "from pyspark.sql import SparkSession\n",
    "from pyspark import SparkFiles\n",
    "import pandas as pd\n",
    "\n",
    "spark.sparkContext.addFile('https://cdn.coggle.club/Pokemon.csv')\n",
    "df = spark.read.csv(\"file://\"+SparkFiles.get(\"Pokemon.csv\"), header=True, inferSchema= True)\n",
    "df = df.withColumnRenamed('Sp. Atk', 'Sp Atk')\n",
    "df = df.withColumnRenamed('Sp. Def', 'Sp Def')\n",
    "df.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 57,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+--------+-----------------+\n",
      "|  Type 1|          avg(HP)|\n",
      "+--------+-----------------+\n",
      "|   Water|          72.0625|\n",
      "|  Poison|            67.25|\n",
      "|   Steel|65.22222222222223|\n",
      "|    Rock|65.36363636363636|\n",
      "|     Ice|             72.0|\n",
      "|   Ghost|          64.4375|\n",
      "|   Fairy|74.11764705882354|\n",
      "| Psychic|70.63157894736842|\n",
      "|  Dragon|          83.3125|\n",
      "|  Flying|            70.75|\n",
      "|     Bug|56.88405797101449|\n",
      "|Electric|59.79545454545455|\n",
      "|    Fire|69.90384615384616|\n",
      "|  Ground|         73.78125|\n",
      "|    Dark|66.80645161290323|\n",
      "|Fighting|69.85185185185185|\n",
      "|   Grass|67.27142857142857|\n",
      "|  Normal|77.27551020408163|\n",
      "+--------+-----------------+\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# 统计Type 1下的HP均值\n",
    "df_group = df.groupby(['Type 1']).agg({\"HP\": \"mean\"}).withColumnRenamed(\"mean(HP)\", \"mean\")\n",
    "df_group.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# SparkSQL基础语法"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "使用spark完成任务一的筛选"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 98,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+------+-----+--------+----+-------+------+--------+\n",
      "|number|class|language|math|english|physic|chemical|\n",
      "+------+-----+--------+----+-------+------+--------+\n",
      "|   003|    3|      86|  91|     83|    89|      63|\n",
      "+------+-----+--------+----+-------+------+--------+\n",
      "\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "from pyspark.sql import SparkSession\n",
    "\n",
    "spark = SparkSession \\\n",
    "    .builder \\\n",
    "    .appName('pyspark') \\\n",
    "    .getOrCreate()\n",
    "# 原始数据 \n",
    "test = spark.createDataFrame([('001','1',100,87,67,83,98), ('002','2',87,81,90,83,83), ('003','3',86,91,83,89,63),\n",
    "                            ('004','2',65,87,94,73,88), ('005','1',76,62,89,81,98), ('006','3',84,82,85,73,99),\n",
    "                            ('007','3',56,76,63,72,87), ('008','1',55,62,46,78,71), ('009','2',63,72,87,98,64)],                           ['number','class','language','math','english','physic','chemical'])\n",
    "\n",
    "test.registerTempTable('test')\n",
    "\n",
    "\n",
    "filter_1 = spark.sql('select * from test where class > 1')\n",
    "filter_2 = spark.sql('select * from test where language > 1 and math > 90')\n",
    "filter_2.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "使用Spark SQL完成任务2里面的统计（列可以不统计）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 195,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+----------+-----------+-----------+-----------+--------+------------+-------------+-----------+-----------+-----------+----------------+---------------+\n",
      "|count_Name|count_Type1|count_Type2|count_Total|count_HP|count_Attack|count_Defense|count_SpAtk|count_SpDef|count_Speed|count_Generation|count_Legendary|\n",
      "+----------+-----------+-----------+-----------+--------+------------+-------------+-----------+-----------+-----------+----------------+---------------+\n",
      "|       799|         18|         18|        200|      94|         111|          103|        105|         92|        108|               6|              2|\n",
      "+----------+-----------+-----------+-----------+--------+------------+-------------+-----------+-----------+-----------+----------------+---------------+\n",
      "\n"
     ]
    }
   ],
   "source": [
    "from pyspark.sql import SparkSession\n",
    "from pyspark import SparkFiles\n",
    "import pandas as pd\n",
    "\n",
    "spark.sparkContext.addFile('https://cdn.coggle.club/Pokemon.csv')\n",
    "df = spark.read.csv(\"file://\"+SparkFiles.get(\"Pokemon.csv\"), header=True, inferSchema= True)\n",
    "df = df.withColumnRenamed('Sp. Atk', 'SpAtk')\n",
    "df = df.withColumnRenamed('Sp. Def', 'SpDef')\n",
    "df = df.withColumnRenamed('Type 1', 'Type1')\n",
    "df = df.withColumnRenamed('Type 2', 'Type2')\n",
    "df.registerTempTable('Pokemon')\n",
    "\n",
    "\n",
    "spark.sql('select '+\n",
    "          'count(distinct Name) as count_Name, '+\n",
    "          'count(distinct Type1) as count_Type1, '+\n",
    "          'count(distinct Type2) as count_Type2, '+\n",
    "          'count(distinct Total) as count_Total, '+\n",
    "          'count(distinct HP) as count_HP, '+\n",
    "          'count(distinct Attack) as count_Attack, ' +\n",
    "          'count(distinct Defense) as count_Defense, ' +\n",
    "          'count(distinct SpAtk) as count_SpAtk, ' +\n",
    "          'count(distinct SpDef) as count_SpDef, ' +\n",
    "          'count(distinct Speed) as count_Speed, ' +\n",
    "          'count(distinct Generation) as count_Generation, ' +\n",
    "          'count(distinct Legendary) as count_Legendary ' +\n",
    "          'from Pokemon').show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "使用Spark SQL完成任务3的分组统计"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 106,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+--------+-----------------+\n",
      "|   Type1|         mean(HP)|\n",
      "+--------+-----------------+\n",
      "|   Water|          72.0625|\n",
      "|  Poison|            67.25|\n",
      "|   Steel|65.22222222222223|\n",
      "|    Rock|65.36363636363636|\n",
      "|     Ice|             72.0|\n",
      "|   Ghost|          64.4375|\n",
      "|   Fairy|74.11764705882354|\n",
      "| Psychic|70.63157894736842|\n",
      "|  Dragon|          83.3125|\n",
      "|  Flying|            70.75|\n",
      "|     Bug|56.88405797101449|\n",
      "|Electric|59.79545454545455|\n",
      "|    Fire|69.90384615384616|\n",
      "|  Ground|         73.78125|\n",
      "|    Dark|66.80645161290323|\n",
      "|Fighting|69.85185185185185|\n",
      "|   Grass|67.27142857142857|\n",
      "|  Normal|77.27551020408163|\n",
      "+--------+-----------------+\n",
      "\n"
     ]
    }
   ],
   "source": [
    "from pyspark.sql import SparkSession\n",
    "from pyspark import SparkFiles\n",
    "import pandas as pd\n",
    "\n",
    "spark.sparkContext.addFile('https://cdn.coggle.club/Pokemon.csv')\n",
    "df = spark.read.csv(\"file://\"+SparkFiles.get(\"Pokemon.csv\"), header=True, inferSchema= True)\n",
    "df = df.withColumnRenamed('Sp. Atk', 'Sp Atk')\n",
    "df = df.withColumnRenamed('Sp. Def', 'Sp Def')\n",
    "\n",
    "df = df.withColumnRenamed('Type 1', 'Type1')\n",
    "df = df.withColumnRenamed('Type 2', 'Type2')\n",
    "df.registerTempTable('Pokemon')\n",
    "\n",
    "spark.sql('select Type1, mean(HP) from Pokemon where 1=1 group by Type1').show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# SparkML基础：数据编码\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 228,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+--------------------+------+------+-----+---+------+-------+-----+-----+-----+----------+---------+\n",
      "|                Name| Type1| Type2|Total| HP|Attack|Defense|SpAtk|SpDef|Speed|Generation|Legendary|\n",
      "+--------------------+------+------+-----+---+------+-------+-----+-----+-----+----------+---------+\n",
      "|           Bulbasaur| Grass|Poison|  318| 45|    49|     49|   65|   65|   45|         1|    false|\n",
      "|             Ivysaur| Grass|Poison|  405| 60|    62|     63|   80|   80|   60|         1|    false|\n",
      "|            Venusaur| Grass|Poison|  525| 80|    82|     83|  100|  100|   80|         1|    false|\n",
      "|VenusaurMega Venu...| Grass|Poison|  625| 80|   100|    123|  122|  120|   80|         1|    false|\n",
      "|           Charizard|  Fire|Flying|  534| 78|    84|     78|  109|   85|  100|         1|    false|\n",
      "|CharizardMega Cha...|  Fire|Dragon|  634| 78|   130|    111|  130|   85|  100|         1|    false|\n",
      "|CharizardMega Cha...|  Fire|Flying|  634| 78|   104|     78|  159|  115|  100|         1|    false|\n",
      "|          Butterfree|   Bug|Flying|  395| 60|    45|     50|   90|   80|   70|         1|    false|\n",
      "|              Weedle|   Bug|Poison|  195| 40|    35|     30|   20|   20|   50|         1|    false|\n",
      "|              Kakuna|   Bug|Poison|  205| 45|    25|     50|   25|   25|   35|         1|    false|\n",
      "|            Beedrill|   Bug|Poison|  395| 65|    90|     40|   45|   80|   75|         1|    false|\n",
      "|BeedrillMega Beed...|   Bug|Poison|  495| 65|   150|     40|   15|   80|  145|         1|    false|\n",
      "|              Pidgey|Normal|Flying|  251| 40|    45|     40|   35|   35|   56|         1|    false|\n",
      "|           Pidgeotto|Normal|Flying|  349| 63|    60|     55|   50|   50|   71|         1|    false|\n",
      "|             Pidgeot|Normal|Flying|  479| 83|    80|     75|   70|   70|  101|         1|    false|\n",
      "| PidgeotMega Pidgeot|Normal|Flying|  579| 83|    80|     80|  135|   80|  121|         1|    false|\n",
      "|             Spearow|Normal|Flying|  262| 40|    60|     30|   31|   31|   70|         1|    false|\n",
      "|              Fearow|Normal|Flying|  442| 65|    90|     65|   61|   61|  100|         1|    false|\n",
      "|           Nidoqueen|Poison|Ground|  505| 90|    92|     87|   75|   85|   76|         1|    false|\n",
      "|            Nidoking|Poison|Ground|  505| 81|   102|     77|   85|   75|   85|         1|    false|\n",
      "+--------------------+------+------+-----+---+------+-------+-----+-----+-----+----------+---------+\n",
      "only showing top 20 rows\n",
      "\n"
     ]
    }
   ],
   "source": [
    "from pyspark.sql import SparkSession\n",
    "from pyspark import SparkFiles\n",
    "\n",
    "from pyspark.ml.feature import OneHotEncoder, StringIndexer, MinMaxScaler, VectorAssembler, PCA\n",
    "from pyspark.ml import Pipeline\n",
    "import pandas as pd\n",
    "\n",
    "spark.sparkContext.addFile('https://cdn.coggle.club/Pokemon.csv')\n",
    "df = spark.read.csv(\"file://\"+SparkFiles.get(\"Pokemon.csv\"), header=True, inferSchema= True)\n",
    "df = df.withColumnRenamed('Sp. Atk', 'SpAtk')\n",
    "df = df.withColumnRenamed('Sp. Def', 'SpDef')\n",
    "df = df.withColumnRenamed('Type 1', 'Type1')\n",
    "df = df.withColumnRenamed('Type 2', 'Type2')\n",
    "\n",
    "df = df.dropna()\n",
    "df.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 229,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+--------------------+\n",
      "|        pca_features|\n",
      "+--------------------+\n",
      "|[-0.1930805207831...|\n",
      "|[-0.2007053240188...|\n",
      "|[-0.2121337642716...|\n",
      "|[-0.2457735826594...|\n",
      "|[0.86414775884762...|\n",
      "|[-0.0937455417718...|\n",
      "|[0.83966939811228...|\n",
      "|[0.92492388312621...|\n",
      "|[-0.0235782644636...|\n",
      "|[-0.0450376994523...|\n",
      "|[-0.0469808506442...|\n",
      "|[-0.0013216740783...|\n",
      "|[1.18566428572304...|\n",
      "|[1.17616847021555...|\n",
      "|[1.17226905243645...|\n",
      "|[1.16684386810112...|\n",
      "|[1.19937996986309...|\n",
      "|[1.18044995011151...|\n",
      "|[-0.1706319415250...|\n",
      "|[-0.1602369846373...|\n",
      "+--------------------+\n",
      "only showing top 20 rows\n",
      "\n"
     ]
    }
   ],
   "source": [
    "'''\n",
    "    onthot\n",
    "'''\n",
    "# 字符串转数字\n",
    "stringIndexer = StringIndexer(inputCols=['Type1', 'Type2'], outputCols=['Type1Idx', 'Type2Idx'])\n",
    "# 创建encoder模型\n",
    "encoder = OneHotEncoder(inputCols=['Type1Idx', 'Type2Idx', 'Generation'],\n",
    "                        outputCols=['Type1Vec', 'Type2Vec', 'GenerationVec'])\n",
    "\n",
    "'''\n",
    "    minmax\n",
    "'''\n",
    "\n",
    "# 转换对象类型\n",
    "total_vecAssembler = VectorAssembler(inputCols=['Total'], outputCol='Total_new')\n",
    "hp_vecAssembler = VectorAssembler(inputCols=['HP'], outputCol='HP_new')\n",
    "attack_vecAssembler = VectorAssembler(inputCols=['Attack'], outputCol='Attack_new')\n",
    "defense_vecAssembler = VectorAssembler(inputCols=['Defense'], outputCol='Defense_new')\n",
    "spAtk_vecAssembler = VectorAssembler(inputCols=['SpAtk'], outputCol='SpAtk_new')\n",
    "spDef_vecAssembler = VectorAssembler(inputCols=['SpDef'], outputCol='SpDef_new')\n",
    "speed_vecAssembler = VectorAssembler(inputCols=['Speed'], outputCol='Speed_new')\n",
    "\n",
    "# 创建归一化模型\n",
    "total_scaler = MinMaxScaler(inputCol='Total_new', outputCol='TotalMinMax')\n",
    "hp_scaler = MinMaxScaler(inputCol='HP_new', outputCol='HPMinMax')\n",
    "attack_scaler = MinMaxScaler(inputCol='Attack_new', outputCol='AttackMinMax')\n",
    "defense_scaler = MinMaxScaler(inputCol='Defense_new', outputCol='DefenseMinMax')\n",
    "spAtk_scaler = MinMaxScaler(inputCol='SpAtk_new', outputCol='SpAtkMinMax')\n",
    "spDef_scaler = MinMaxScaler(inputCol='SpDef_new', outputCol='SpDefMinMax')\n",
    "speed_scaler = MinMaxScaler(inputCol='Speed_new', outputCol='SpeedMinMax')\n",
    "\n",
    "'''\n",
    "    pipeline\n",
    "'''\n",
    "\n",
    "pipeline = Pipeline(stages=[\n",
    "    stringIndexer,encoder,\n",
    "    total_vecAssembler, hp_vecAssembler, attack_vecAssembler, defense_vecAssembler, spAtk_vecAssembler, spDef_vecAssembler, speed_vecAssembler,\n",
    "    total_scaler, hp_scaler, attack_scaler, defense_scaler, spAtk_scaler, spDef_scaler, speed_scaler,\n",
    "    ])\n",
    "\n",
    "\n",
    "model = pipeline.fit(df)\n",
    "total_df = model.transform(df)\n",
    "\n",
    "'''\n",
    "    pca\n",
    "'''\n",
    "# 将所有特征合并\n",
    "total_df = total_df.select('Type1Vec','Type2Vec','TotalMinMax','HPMinMax','AttackMinMax','DefenseMinMax','SpAtkMinMax','SpDefMinMax','SpeedMinMax')\n",
    "total_vecAssembler = VectorAssembler(inputCols=['Type1Vec','Type2Vec','TotalMinMax','HPMinMax','AttackMinMax','DefenseMinMax','SpAtkMinMax','SpDefMinMax','SpeedMinMax'], \n",
    "                                     outputCol='features')\n",
    "total_df = total_vecAssembler.transform(total_df)\n",
    "\n",
    "total_df = total_df.select('features')\n",
    "\n",
    "# pca降维，暂定5维\n",
    "pca = PCA(k=5, inputCol='features')\n",
    "pca.setOutputCol(\"pca_features\")\n",
    "model = pca.fit(total_df)\n",
    "\n",
    "final_df = model.transform(total_df)\n",
    "\n",
    "final_df.select('pca_features').show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# SparkML基础：分类模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 230,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+----------+\n",
      "|count_Name|\n",
      "+----------+\n",
      "|        18|\n",
      "+----------+\n",
      "\n"
     ]
    }
   ],
   "source": [
    "newData = final_df.select('pca_features').join(df.select('Type1'))\n",
    "# 字符串转数字\n",
    "stringIndexer = StringIndexer(inputCols=['Type1'], outputCols=['Type1Idx'])\n",
    "model = stringIndexer.fit(newData)\n",
    "newData = model.transform(newData)\n",
    "\n",
    "\n",
    "newData.registerTempTable('tmp')\n",
    "\n",
    "\n",
    "spark.sql('select '+\n",
    "          'count(distinct Type1Idx) as count_Name '\n",
    "          'from tmp').show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "假设type1作为标签，任务将是一个多分类任务\n",
    "\n",
    "适合使用f1作为评分标准"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 231,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 决策树的训练方式\n",
    "from pyspark.ml.classification import DecisionTreeClassifier\n",
    "dt = DecisionTreeClassifier(maxDepth=5, labelCol='Type1Idx', featuresCol='pca_features')\n",
    "model = dt.fit(newData)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 232,
   "metadata": {},
   "outputs": [],
   "source": [
    "# GBT的训练方式\n",
    "from pyspark.ml.classification import RandomForestClassifier\n",
    "rf = RandomForestClassifier(maxDepth=5, labelCol='Type1Idx', featuresCol='pca_features')\n",
    "model = rf.fit(newData)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 233,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 逻辑回归的分类方式\n",
    "from pyspark.ml.classification import LogisticRegression\n",
    "nb = LogisticRegression(labelCol='Type1Idx', featuresCol='pca_features')\n",
    "model = nb.fit(newData)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# SparkML基础：聚类模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 234,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+--------------------+-----+--------+\n",
      "|        pca_features|Type1|Type1Idx|\n",
      "+--------------------+-----+--------+\n",
      "|[-0.1930805207831...|Grass|     2.0|\n",
      "|[-0.2007053240188...|Grass|     2.0|\n",
      "|[-0.2121337642716...|Grass|     2.0|\n",
      "|[-0.2457735826594...|Grass|     2.0|\n",
      "|[0.86414775884762...|Grass|     2.0|\n",
      "|[-0.0937455417718...|Grass|     2.0|\n",
      "|[0.83966939811228...|Grass|     2.0|\n",
      "|[0.92492388312621...|Grass|     2.0|\n",
      "|[-0.0235782644636...|Grass|     2.0|\n",
      "|[-0.0450376994523...|Grass|     2.0|\n",
      "|[-0.0469808506442...|Grass|     2.0|\n",
      "|[-0.0013216740783...|Grass|     2.0|\n",
      "|[1.18566428572304...|Grass|     2.0|\n",
      "|[1.17616847021555...|Grass|     2.0|\n",
      "|[1.17226905243645...|Grass|     2.0|\n",
      "|[1.16684386810112...|Grass|     2.0|\n",
      "|[1.19937996986309...|Grass|     2.0|\n",
      "|[1.18044995011151...|Grass|     2.0|\n",
      "|[-0.1706319415250...|Grass|     2.0|\n",
      "|[-0.1602369846373...|Grass|     2.0|\n",
      "+--------------------+-----+--------+\n",
      "only showing top 20 rows\n",
      "\n"
     ]
    }
   ],
   "source": [
    "newData = final_df.select('pca_features').join(df.select('Type1'))\n",
    "# 字符串转数字\n",
    "stringIndexer = StringIndexer(inputCols=['Type1'], outputCols=['Type1Idx'])\n",
    "model = stringIndexer.fit(newData)\n",
    "newData = model.transform(newData)\n",
    "newData.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 247,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 20/20 [01:10<00:00,  3.50s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "最优聚类数： 12\n",
      "{2: 0.44012933511064295, 3: 0.44639496159980074, 4: 0.576105884770639, 5: 0.49090600461867884, 6: 0.37567988014506515, 7: 0.5539762625697647, 8: 0.5084661919666779, 9: 0.5052825404621821, 10: 0.4910631525170517, 11: 0.5898168430008117, 12: 0.6000516677645601, 13: 0.5267450758961985, 14: 0.5813612046787775, 15: 0.5916289998556884, 16: 0.5854780984408632, 17: 0.5631945185332683, 18: 0.56192627419674, 19: 0.5215151182734858, 20: 0.5382877542710011, 21: 0.5299777038148125}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "from pyspark.ml.evaluation import ClusteringEvaluator\n",
    "from pyspark.ml.clustering import KMeans\n",
    "from tqdm import tqdm\n",
    "\n",
    "k_result = {}\n",
    "best_score = 0\n",
    "best_k = 0\n",
    "for k in tqdm(range(2,22)):\n",
    "    kmeans = KMeans(k=k, featuresCol='pca_features')\n",
    "    model = kmeans.fit(newData)\n",
    "    kmeans_result = model.transform(newData)\n",
    "\n",
    "    evaluator = ClusteringEvaluator(featuresCol='pca_features', predictionCol='prediction')\n",
    "    score = evaluator.evaluate(kmeans_result)\n",
    "    if best_score < score:\n",
    "        best_k = k\n",
    "        best_score = score\n",
    "    k_result[k] = score\n",
    "print('最优聚类数：',best_k)\n",
    "print(k_result)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Spark RDD"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/usr/local/lib/python3.6/site-packages/pyspark/context.py:238: FutureWarning: Python 3.6 support is deprecated in Spark 3.2.\n",
      "  FutureWarning\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+--------------------+-----+------+-----+---+------+-------+-----+-----+-----+----------+---------+\n",
      "|                Name|Type1| Type2|Total| HP|Attack|Defense|SpAtk|SpDef|Speed|Generation|Legendary|\n",
      "+--------------------+-----+------+-----+---+------+-------+-----+-----+-----+----------+---------+\n",
      "|           Bulbasaur|Grass|Poison|  318| 45|    49|     49|   65|   65|   45|         1|    false|\n",
      "|             Ivysaur|Grass|Poison|  405| 60|    62|     63|   80|   80|   60|         1|    false|\n",
      "|            Venusaur|Grass|Poison|  525| 80|    82|     83|  100|  100|   80|         1|    false|\n",
      "|VenusaurMega Venu...|Grass|Poison|  625| 80|   100|    123|  122|  120|   80|         1|    false|\n",
      "|          Charmander| Fire|  null|  309| 39|    52|     43|   60|   50|   65|         1|    false|\n",
      "|          Charmeleon| Fire|  null|  405| 58|    64|     58|   80|   65|   80|         1|    false|\n",
      "|           Charizard| Fire|Flying|  534| 78|    84|     78|  109|   85|  100|         1|    false|\n",
      "|CharizardMega Cha...| Fire|Dragon|  634| 78|   130|    111|  130|   85|  100|         1|    false|\n",
      "|CharizardMega Cha...| Fire|Flying|  634| 78|   104|     78|  159|  115|  100|         1|    false|\n",
      "|            Squirtle|Water|  null|  314| 44|    48|     65|   50|   64|   43|         1|    false|\n",
      "|           Wartortle|Water|  null|  405| 59|    63|     80|   65|   80|   58|         1|    false|\n",
      "|           Blastoise|Water|  null|  530| 79|    83|    100|   85|  105|   78|         1|    false|\n",
      "|BlastoiseMega Bla...|Water|  null|  630| 79|   103|    120|  135|  115|   78|         1|    false|\n",
      "|            Caterpie|  Bug|  null|  195| 45|    30|     35|   20|   20|   45|         1|    false|\n",
      "|             Metapod|  Bug|  null|  205| 50|    20|     55|   25|   25|   30|         1|    false|\n",
      "|          Butterfree|  Bug|Flying|  395| 60|    45|     50|   90|   80|   70|         1|    false|\n",
      "|              Weedle|  Bug|Poison|  195| 40|    35|     30|   20|   20|   50|         1|    false|\n",
      "|              Kakuna|  Bug|Poison|  205| 45|    25|     50|   25|   25|   35|         1|    false|\n",
      "|            Beedrill|  Bug|Poison|  395| 65|    90|     40|   45|   80|   75|         1|    false|\n",
      "|BeedrillMega Beed...|  Bug|Poison|  495| 65|   150|     40|   15|   80|  145|         1|    false|\n",
      "+--------------------+-----+------+-----+---+------+-------+-----+-----+-----+----------+---------+\n",
      "only showing top 20 rows\n",
      "\n"
     ]
    }
   ],
   "source": [
    "from pyspark.sql import SparkSession\n",
    "from pyspark import SparkFiles\n",
    "import pandas as pd\n",
    "\n",
    "spark = SparkSession.builder.appName('pyspark').getOrCreate()\n",
    "spark.sparkContext.addFile('https://cdn.coggle.club/Pokemon.csv')\n",
    "df = spark.read.csv(\"file://\"+SparkFiles.get(\"Pokemon.csv\"), header=True, inferSchema= True)\n",
    "df = df.withColumnRenamed('Sp. Atk', 'SpAtk')\n",
    "df = df.withColumnRenamed('Sp. Def', 'SpDef')\n",
    "df = df.withColumnRenamed('Type 1', 'Type1')\n",
    "df = df.withColumnRenamed('Type 2', 'Type2')\n",
    "df.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "rdd = df.rdd"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------- Name ----------\n",
      "不同的取值个数: 799\n",
      "空值个数: 0\n",
      "---------- Type1 ----------\n",
      "不同的取值个数: 18\n",
      "空值个数: 0\n",
      "---------- Type2 ----------\n",
      "不同的取值个数: 19\n",
      "空值个数: 386\n",
      "---------- Total ----------\n",
      "不同的取值个数: 200\n",
      "空值个数: 0\n",
      "---------- HP ----------\n",
      "不同的取值个数: 94\n",
      "空值个数: 0\n",
      "---------- Attack ----------\n",
      "不同的取值个数: 111\n",
      "空值个数: 0\n",
      "---------- Defense ----------\n",
      "不同的取值个数: 103\n",
      "空值个数: 0\n",
      "---------- SpAtk ----------\n",
      "不同的取值个数: 105\n",
      "空值个数: 0\n",
      "---------- SpDef ----------\n",
      "不同的取值个数: 92\n",
      "空值个数: 0\n",
      "---------- Speed ----------\n",
      "不同的取值个数: 108\n",
      "空值个数: 0\n",
      "---------- Generation ----------\n",
      "不同的取值个数: 6\n",
      "空值个数: 0\n",
      "---------- Legendary ----------\n",
      "不同的取值个数: 2\n",
      "空值个数: 0\n"
     ]
    }
   ],
   "source": [
    "cols = df.columns\n",
    "for i in range(len(cols)):\n",
    "    print('-'*10,cols[i],'-'*10)\n",
    "    print('不同的取值个数:',len(dict(rdd.map(lambda x: x[i]).countByValue())))\n",
    "    print('空值个数:',rdd.filter(lambda x: x[i] == None).count())\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Spark Streaming"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<pyspark.streaming.dstream.TransformedDStream object at 0x7f6fd57eb710>\n"
     ]
    }
   ],
   "source": [
    "from pyspark.streaming import StreamingContext\n",
    "\n",
    "from pyspark.sql import SparkSession\n",
    "from pyspark import SparkFiles\n",
    "import pandas as pd\n",
    "\n",
    "spark = SparkSession.builder.appName(\"CrossCorrelation\").getOrCreate()\n",
    "ssc = StreamingContext(spark.sparkContext, 1)\n",
    "ds = ssc.textFileStream('https://cdn.coggle.club/Pokemon.csv')\n",
    "\n",
    "print(ds.countByValue())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
