{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "from pyspark.sql import SparkSession\n",
    "from pyspark.sql.types import *\n",
    "from pyspark.sql.functions import *"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 本地创建数据\n",
    "\n",
    "```python\n",
    "spark = SparkSession.builder.appName(\"SparkExample\").master('local').getOrCreate()\n",
    "rdd = spark.sparkContext.parallelize([('tom', 20), ('jack', 16)])\n",
    "\n",
    "df = rdd.toDF(['name', 'age'])\n",
    "\n",
    "df.printSchema()\n",
    "df.show()\n",
    "```"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2022-06-18 16:26:43 WARN  Utils:66 - Your hostname, gpu2 resolves to a loopback address: 127.0.1.1; using 10.132.73.149 instead (on interface eno2)\n",
      "2022-06-18 16:26:43 WARN  Utils:66 - Set SPARK_LOCAL_IP if you need to bind to another address\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING: An illegal reflective access operation has occurred\n",
      "WARNING: Illegal reflective access by org.apache.hadoop.security.authentication.util.KerberosUtil (file:/home/guoruiming/miniconda3/envs/pyspark_env/lib/python3.7/site-packages/pyspark/jars/hadoop-auth-2.7.3.jar) to method sun.security.krb5.Config.getInstance()\n",
      "WARNING: Please consider reporting this to the maintainers of org.apache.hadoop.security.authentication.util.KerberosUtil\n",
      "WARNING: Use --illegal-access=warn to enable warnings of further illegal reflective access operations\n",
      "WARNING: All illegal access operations will be denied in a future release\n",
      "Setting default log level to \"WARN\".\n",
      "To adjust logging level use sc.setLogLevel(newLevel). For SparkR, use setLogLevel(newLevel).\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2022-06-18 16:26:43 WARN  NativeCodeLoader:62 - Unable to load native-hadoop library for your platform... using builtin-java classes where applicable\n"
     ]
    }
   ],
   "source": [
    "spark = SparkSession.builder.master('local').appName(\"HelloSpark\").getOrCreate()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 2.1 从数据源读取数据\n",
    "df = spark.read.option('header', True).option('inferSchema', True).csv(\"BeijingPM20100101_20151231.csv\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "root\n",
      " |-- No: integer (nullable = true)\n",
      " |-- year: integer (nullable = true)\n",
      " |-- month: integer (nullable = true)\n",
      " |-- day: integer (nullable = true)\n",
      " |-- hour: integer (nullable = true)\n",
      " |-- season: integer (nullable = true)\n",
      " |-- PM_Dongsi: string (nullable = true)\n",
      " |-- PM_Dongsihuan: string (nullable = true)\n",
      " |-- PM_Nongzhanguan: string (nullable = true)\n",
      " |-- PM_US_Post: string (nullable = true)\n",
      " |-- DEWP: string (nullable = true)\n",
      " |-- HUMI: string (nullable = true)\n",
      " |-- PRES: string (nullable = true)\n",
      " |-- TEMP: string (nullable = true)\n",
      " |-- cbwd: string (nullable = true)\n",
      " |-- Iws: string (nullable = true)\n",
      " |-- precipitation: string (nullable = true)\n",
      " |-- Iprec: string (nullable = true)\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# 2.2 print schema\n",
    "df.printSchema()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+---+----+-----+---+----+------+---------+-------------+---------------+----------+----+----+----+----+----+-----+-------------+-----+\n",
      "| No|year|month|day|hour|season|PM_Dongsi|PM_Dongsihuan|PM_Nongzhanguan|PM_US_Post|DEWP|HUMI|PRES|TEMP|cbwd|  Iws|precipitation|Iprec|\n",
      "+---+----+-----+---+----+------+---------+-------------+---------------+----------+----+----+----+----+----+-----+-------------+-----+\n",
      "|  1|2010|    1|  1|   0|     4|       NA|           NA|             NA|        NA| -21|  43|1021| -11|  NW| 1.79|            0|    0|\n",
      "|  2|2010|    1|  1|   1|     4|       NA|           NA|             NA|        NA| -21|  47|1020| -12|  NW| 4.92|            0|    0|\n",
      "|  3|2010|    1|  1|   2|     4|       NA|           NA|             NA|        NA| -21|  43|1019| -11|  NW| 6.71|            0|    0|\n",
      "|  4|2010|    1|  1|   3|     4|       NA|           NA|             NA|        NA| -21|  55|1019| -14|  NW| 9.84|            0|    0|\n",
      "|  5|2010|    1|  1|   4|     4|       NA|           NA|             NA|        NA| -20|  51|1018| -12|  NW|12.97|            0|    0|\n",
      "+---+----+-----+---+----+------+---------+-------------+---------------+----------+----+----+----+----+----+-----+-------------+-----+\n",
      "only showing top 5 rows\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# 2.3 show top 5 data\n",
    "df.show(5, truncate=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "DataFrame[year: int, month: int, PM_Dongsi: string]"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 2.4 使用 groupby 过滤\n",
    "clean_df = df.select(\"year\", \"month\", \"PM_Dongsi\")\n",
    "clean_df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+----+-----+-----+\n",
      "|year|month|count|\n",
      "+----+-----+-----+\n",
      "|2015|    2|  666|\n",
      "|2014|    4|  698|\n",
      "|2015|   12|  732|\n",
      "|2013|    2|  641|\n",
      "|2014|   10|  735|\n",
      "|2014|   12|  712|\n",
      "|2013|    9|  694|\n",
      "|2013|   10|  682|\n",
      "|2014|    5|  710|\n",
      "|2013|   12|  727|\n",
      "|2014|    1|  723|\n",
      "|2013|    3|  722|\n",
      "|2014|    8|  733|\n",
      "|2013|    6|  613|\n",
      "|2015|    4|  709|\n",
      "|2015|    8|  737|\n",
      "|2015|   11|  706|\n",
      "|2015|    9|  710|\n",
      "|2013|    7|  649|\n",
      "|2014|    9|  716|\n",
      "+----+-----+-----+\n",
      "only showing top 20 rows\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# 2.5 groupby Pandas 方式\n",
    "year_month_count_df = clean_df.where(\"PM_Dongsi != 'NA'\").groupBy('year', 'month').count()\n",
    "year_month_count_df.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+----+-----+---------+\n",
      "|year|month|PM_Dongsi|\n",
      "+----+-----+---------+\n",
      "|2010|    1|       NA|\n",
      "|2010|    1|       NA|\n",
      "|2010|    1|       NA|\n",
      "|2010|    1|       NA|\n",
      "|2010|    1|       NA|\n",
      "|2010|    1|       NA|\n",
      "|2010|    1|       NA|\n",
      "|2010|    1|       NA|\n",
      "|2010|    1|       NA|\n",
      "|2010|    1|       NA|\n",
      "+----+-----+---------+\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# 2.6 groupby SQL 方式\n",
    "df.createOrReplaceTempView(\"pm_df\") # 注册为临时表\n",
    "spark.sql(\"SELECT year, month, PM_Dongsi FROM pm_df LIMIT 10\").show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+----+-----+--------+\n",
      "|year|month|count(1)|\n",
      "+----+-----+--------+\n",
      "|2015|    2|     666|\n",
      "|2014|    4|     698|\n",
      "|2015|   12|     732|\n",
      "|2013|    2|     641|\n",
      "|2014|   10|     735|\n",
      "|2014|   12|     712|\n",
      "|2013|    9|     694|\n",
      "|2013|   10|     682|\n",
      "|2014|    5|     710|\n",
      "|2013|   12|     727|\n",
      "|2014|    1|     723|\n",
      "|2013|    3|     722|\n",
      "|2014|    8|     733|\n",
      "|2013|    6|     613|\n",
      "|2015|    4|     709|\n",
      "|2015|    8|     737|\n",
      "|2015|   11|     706|\n",
      "|2015|    9|     710|\n",
      "|2013|    7|     649|\n",
      "|2014|    9|     716|\n",
      "+----+-----+--------+\n",
      "only showing top 20 rows\n",
      "\n"
     ]
    }
   ],
   "source": [
    "spark.sql(\"\"\"\n",
    "SELECT year, month, COUNT(*)\n",
    "FROM pm_df\n",
    "WHERE PM_Dongsi != 'NA'\n",
    "GROUP BY year, month\n",
    "\"\"\").show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Spark 中的 Schema"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 3.1 数据类型\n",
    "\n",
    "基本数据类型与 `Scala` 的数据类型基本相同\n",
    "\n",
    "| DataType | Value assigned in Scala | API to instantiate |\n",
    "| :------: | :---------------------: | :----------------: |\n",
    "| ByteType | Byte | DataTypes.ByteType |\n",
    "| ShortType | Short | DataTypes.ShortType |\n",
    "| IntegerType | Int | DataTypes.IntegerType |\n",
    "| LongType | Long | DataTypes.LongType |\n",
    "| FloatType | Float | DataTypes.FloatType |\n",
    "| DoubleType | Double | DataTypes.DoubleType |\n",
    "| StringType | String | DataTypes.StringType |\n",
    "| BooleanType | Boolean | DataTypes.BooleanType |\n",
    "| DecimalType | java.math.BigDecimal | DataTypes.DecimalType |\n",
    "\n",
    "复杂数据类型如下\n",
    "\n",
    "| DataType | Value assigned in Scala | API to instantiate |\n",
    "| :------: | :---------------------: | :----------------: |\n",
    "| BinaryType | Array[Byte] | DataTypes.ByteType |\n",
    "| TimeStampType | java.sql.Timestamp | DataTypes.TimeStampType |\n",
    "| DateType | java.sql.Date | DataTypes.DateType |\n",
    "| ArrayType | scala.collection.Seq | DataTypes.createArrayType(ElementType) |\n",
    "| MapType | scala.collection.Map | DataTypes.createMapType(keyType, valueType) |\n",
    "| StructType | org.apache.spark.sql.Row | StructTyep(ArrayType[fieldTypes]) |\n",
    "| StructField | A value corresponding to the type of this field | StructField(name, dataType, nullable?) |\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "root\n",
      " |-- No: integer (nullable = true)\n",
      " |-- year: integer (nullable = true)\n",
      " |-- month: integer (nullable = true)\n",
      " |-- day: integer (nullable = true)\n",
      " |-- hour: integer (nullable = true)\n",
      " |-- season: integer (nullable = true)\n",
      " |-- PM_Dongsi: string (nullable = true)\n",
      " |-- PM_Dongsihuan: string (nullable = true)\n",
      " |-- PM_Nongzhanguan: string (nullable = true)\n",
      " |-- PM_US_Post: string (nullable = true)\n",
      " |-- DEWP: string (nullable = true)\n",
      " |-- HUMI: string (nullable = true)\n",
      " |-- PRES: string (nullable = true)\n",
      " |-- TEMP: string (nullable = true)\n",
      " |-- cbwd: string (nullable = true)\n",
      " |-- Iws: string (nullable = true)\n",
      " |-- precipitation: string (nullable = true)\n",
      " |-- Iprec: string (nullable = true)\n",
      "\n"
     ]
    }
   ],
   "source": [
    "df.printSchema()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "一般来说，我们都会自定义DF的Schema，这样做有几个好处\n",
    "\n",
    "把spark从繁重的推测解放出来\n",
    "如果数据太多，spark会花费大量的计算资源去做推断\n",
    "如果已经定义好schema，那么错误的数据就可以检测出来。\n",
    "通常有两种方式来定义Schema"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "一般有两种方式定义 `Schema`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "from pyspark.sql.types import *\n",
    "\n",
    "schema = StructType([\n",
    "    StructField(\"Id\", IntegerType(), False),\n",
    "    StructField(\"First\", StringType(), False),\n",
    "    StructField(\"Last\", StringType(), False),\n",
    "    StructField(\"Url\", StringType(), False),\n",
    "    StructField(\"Published\", StringType(), False),\n",
    "    StructField(\"Hits\", IntegerType(), False),\n",
    "    StructField(\"Campaigns\", ArrayType(StringType()), False),\n",
    "])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "df = spark.read.schema(schema).json('blogs.txt')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+---+---------+-------+-----------------+---------+-----+----------------------------+\n",
      "|Id |First    |Last   |Url              |Published|Hits |Campaigns                   |\n",
      "+---+---------+-------+-----------------+---------+-----+----------------------------+\n",
      "|1  |Jules    |Damji  |https://tinyurl.1|1/4/2016 |4535 |[twitter, LinkedIn]         |\n",
      "|2  |Brooke   |Wenig  |https://tinyurl.2|5/5/2018 |8908 |[twitter, LinkedIn]         |\n",
      "|3  |Denny    |Lee    |https://tinyurl.3|6/7/2019 |7659 |[web, twitter, FB, LinkedIn]|\n",
      "|4  |Tathagata|Das    |https://tinyurl.4|5/12/2018|10568|[twitter, FB]               |\n",
      "|5  |Matei    |Zaharia|https://tinyurl.5|5/14/2014|40578|[web, twitter, FB, LinkedIn]|\n",
      "|6  |Reynold  |Xin    |https://tinyurl.6|3/2/2015 |25568|[twitter, LinkedIn]         |\n",
      "+---+---------+-------+-----------------+---------+-----+----------------------------+\n",
      "\n"
     ]
    }
   ],
   "source": [
    "df.show(truncate=False)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 列，行和表达式\n",
    "\n",
    "### 列（Column）概述\n",
    "\n",
    "列（Column）类似于Pands中列和数据库表中的列，Spark中列用类型Column来表示\n",
    "\n",
    "以下是一些例子"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "spark = SparkSession.builder.appName(\"HelloSpark\").master(\"local\").getOrCreate()\n",
    "schema = StructType([\n",
    "    StructField(\"Id\", IntegerType(), False),\n",
    "    StructField(\"First\", StringType(), False),\n",
    "    StructField(\"Last\", StringType(), False),\n",
    "    StructField(\"Url\", StringType(), False),\n",
    "    StructField(\"Published\", StringType(), False),\n",
    "    StructField(\"Hits\", IntegerType(), False),\n",
    "    StructField(\"Campaigns\", ArrayType(StringType()), False),\n",
    "])\n",
    "blogsDF = spark.read.schema(schema).json(\"blogs.txt\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['Id', 'First', 'Last', 'Url', 'Published', 'Hits', 'Campaigns']"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 查看所有列（Python List）\n",
    "blogsDF.columns"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Column<b'Id'>"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 获取名为 `id` 的列，返回 Column 对象\n",
    "blogsDF[\"Id\"]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+-----+----------+\n",
      "| Hits|(Hits * 2)|\n",
      "+-----+----------+\n",
      "| 4535|      9070|\n",
      "| 8908|     17816|\n",
      "| 7659|     15318|\n",
      "|10568|     21136|\n",
      "|40578|     81156|\n",
      "|25568|     51136|\n",
      "+-----+----------+\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# 使用 col 值进行计算\n",
    "blogsDF.select([col(\"Hits\"), col(\"Hits\")*2]).show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+----------+----------+\n",
      "|(Hits * 2)|(Hits * 3)|\n",
      "+----------+----------+\n",
      "|      9070|     13605|\n",
      "|     17816|     26724|\n",
      "|     15318|     22977|\n",
      "|     21136|     31704|\n",
      "|     81156|    121734|\n",
      "|     51136|     76704|\n",
      "+----------+----------+\n",
      "\n"
     ]
    }
   ],
   "source": [
    "blogsDF.select([expr(\"Hits * 2\"), col('Hits') * 3]).show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+---+---------+-------+-----------------+---------+-----+--------------------+-----------+\n",
      "| Id|    First|   Last|              Url|Published| Hits|           Campaigns|Bigger Hits|\n",
      "+---+---------+-------+-----------------+---------+-----+--------------------+-----------+\n",
      "|  1|    Jules|  Damji|https://tinyurl.1| 1/4/2016| 4535| [twitter, LinkedIn]|      false|\n",
      "|  2|   Brooke|  Wenig|https://tinyurl.2| 5/5/2018| 8908| [twitter, LinkedIn]|      false|\n",
      "|  3|    Denny|    Lee|https://tinyurl.3| 6/7/2019| 7659|[web, twitter, FB...|      false|\n",
      "|  4|Tathagata|    Das|https://tinyurl.4|5/12/2018|10568|       [twitter, FB]|       true|\n",
      "|  5|    Matei|Zaharia|https://tinyurl.5|5/14/2014|40578|[web, twitter, FB...|       true|\n",
      "|  6|  Reynold|    Xin|https://tinyurl.6| 3/2/2015|25568| [twitter, LinkedIn]|       true|\n",
      "+---+---------+-------+-----------------+---------+-----+--------------------+-----------+\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# withColumn 添加一列\n",
    "blogsDF.withColumn(\"Bigger Hits\", expr(\"Hits > 10000\")).show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+-------------+\n",
      "|    AuthorsId|\n",
      "+-------------+\n",
      "|  JulesDamji1|\n",
      "| BrookeWenig2|\n",
      "|    DennyLee3|\n",
      "|TathagataDas4|\n",
      "|MateiZaharia5|\n",
      "|  ReynoldXin6|\n",
      "+-------------+\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# 使用 concat 连接表达式\n",
    "blogsDF.withColumn(\"AuthorsId\", concat(expr(\"First\"), expr(\"Last\"), expr(\"Id\")))\\\n",
    "    .select(col(\"AuthorsId\"))\\\n",
    "    .show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+---+---------+-------+-----------------+---------+-----+----------------------------+\n",
      "|Id |First    |Last   |Url              |Published|Hits |Campaigns                   |\n",
      "+---+---------+-------+-----------------+---------+-----+----------------------------+\n",
      "|5  |Matei    |Zaharia|https://tinyurl.5|5/14/2014|40578|[web, twitter, FB, LinkedIn]|\n",
      "|6  |Reynold  |Xin    |https://tinyurl.6|3/2/2015 |25568|[twitter, LinkedIn]         |\n",
      "|4  |Tathagata|Das    |https://tinyurl.4|5/12/2018|10568|[twitter, FB]               |\n",
      "|2  |Brooke   |Wenig  |https://tinyurl.2|5/5/2018 |8908 |[twitter, LinkedIn]         |\n",
      "|3  |Denny    |Lee    |https://tinyurl.3|6/7/2019 |7659 |[web, twitter, FB, LinkedIn]|\n",
      "|1  |Jules    |Damji  |https://tinyurl.1|1/4/2016 |4535 |[twitter, LinkedIn]         |\n",
      "+---+---------+-------+-----------------+---------+-----+----------------------------+\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# 使用 sort 排序，desc 降序，asc 升序\n",
    "blogsDF.sort(col('Hits').desc()).show(truncate=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+------+\n",
      "|Unique|\n",
      "+------+\n",
      "|     1|\n",
      "|     2|\n",
      "|     3|\n",
      "|     4|\n",
      "|     5|\n",
      "|     6|\n",
      "+------+\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# alias 取别名\n",
    "blogsDF.select(col('Id').alias('Unique')).show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['Id', 'First', 'Last', 'Url', 'Published', 'Hits', 'Campaigns']\n"
     ]
    }
   ],
   "source": [
    "blog_df = blogsDF\n",
    "print(blog_df.columns)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Column<b'Id'>\n",
      "+---+---------+\n",
      "| Id|    First|\n",
      "+---+---------+\n",
      "|  1|    Jules|\n",
      "|  2|   Brooke|\n",
      "|  3|    Denny|\n",
      "|  4|Tathagata|\n",
      "|  5|    Matei|\n",
      "|  6|  Reynold|\n",
      "+---+---------+\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# 2. 通过列名获取列的对象\n",
    "print(col('Id'))\n",
    "blog_df.select(col('Id'), col('First')).show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+---+---------+-----+----------+\n",
      "| Id|    First| Hits|(Hits * 2)|\n",
      "+---+---------+-----+----------+\n",
      "|  1|    Jules| 4535|      9070|\n",
      "|  2|   Brooke| 8908|     17816|\n",
      "|  3|    Denny| 7659|     15318|\n",
      "|  4|Tathagata|10568|     21136|\n",
      "|  5|    Matei|40578|     81156|\n",
      "|  6|  Reynold|25568|     51136|\n",
      "+---+---------+-----+----------+\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# 3. col进行运算\n",
    "blog_df.select(col('Id'), col('First'), col('Hits'), col('Hits') * 2).show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+---+---------+\n",
      "| Id|    First|\n",
      "+---+---------+\n",
      "|  1|    Jules|\n",
      "|  2|   Brooke|\n",
      "|  3|    Denny|\n",
      "|  4|Tathagata|\n",
      "|  5|    Matei|\n",
      "|  6|  Reynold|\n",
      "+---+---------+\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# 4. 简单的写法\n",
    "blog_df.select('Id', 'First').show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+---+----------+\n",
      "| Id|(Hits * 2)|\n",
      "+---+----------+\n",
      "|  1|      9070|\n",
      "|  2|     17816|\n",
      "|  3|     15318|\n",
      "|  4|     21136|\n",
      "|  5|     81156|\n",
      "|  6|     51136|\n",
      "+---+----------+\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# 5. expr表达式\n",
    "blog_df.select(col('Id'), expr(\"Hits * 2\")).show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+---+---------+-------+-----------------+---------+-----+--------------------+-----------+\n",
      "| Id|    First|   Last|              Url|Published| Hits|           Campaigns|Bigger Hits|\n",
      "+---+---------+-------+-----------------+---------+-----+--------------------+-----------+\n",
      "|  1|    Jules|  Damji|https://tinyurl.1| 1/4/2016| 4535| [twitter, LinkedIn]|      false|\n",
      "|  2|   Brooke|  Wenig|https://tinyurl.2| 5/5/2018| 8908| [twitter, LinkedIn]|      false|\n",
      "|  3|    Denny|    Lee|https://tinyurl.3| 6/7/2019| 7659|[web, twitter, FB...|      false|\n",
      "|  4|Tathagata|    Das|https://tinyurl.4|5/12/2018|10568|       [twitter, FB]|       true|\n",
      "|  5|    Matei|Zaharia|https://tinyurl.5|5/14/2014|40578|[web, twitter, FB...|       true|\n",
      "|  6|  Reynold|    Xin|https://tinyurl.6| 3/2/2015|25568| [twitter, LinkedIn]|       true|\n",
      "+---+---------+-------+-----------------+---------+-----+--------------------+-----------+\n",
      "\n",
      "+---+---------+-------+-----------------+---------+-----+--------------------+-----------+\n",
      "| Id|    First|   Last|              Url|Published| Hits|           Campaigns|Bigger Hits|\n",
      "+---+---------+-------+-----------------+---------+-----+--------------------+-----------+\n",
      "|  1|    Jules|  Damji|https://tinyurl.1| 1/4/2016| 4535| [twitter, LinkedIn]|      false|\n",
      "|  2|   Brooke|  Wenig|https://tinyurl.2| 5/5/2018| 8908| [twitter, LinkedIn]|      false|\n",
      "|  3|    Denny|    Lee|https://tinyurl.3| 6/7/2019| 7659|[web, twitter, FB...|      false|\n",
      "|  4|Tathagata|    Das|https://tinyurl.4|5/12/2018|10568|       [twitter, FB]|       true|\n",
      "|  5|    Matei|Zaharia|https://tinyurl.5|5/14/2014|40578|[web, twitter, FB...|       true|\n",
      "|  6|  Reynold|    Xin|https://tinyurl.6| 3/2/2015|25568| [twitter, LinkedIn]|       true|\n",
      "+---+---------+-------+-----------------+---------+-----+--------------------+-----------+\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# 6. 添加一列, withColumn\n",
    "blog_df.withColumn('Bigger Hits', col('Hits') > 10000).show()\n",
    "blog_df.withColumn('Bigger Hits', expr(\"Hits > 10000\")).show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+---+-----------------+---------+-----+--------------------+------------+\n",
      "| Id|              Url|Published| Hits|           Campaigns|        Name|\n",
      "+---+-----------------+---------+-----+--------------------+------------+\n",
      "|  1|https://tinyurl.1| 1/4/2016| 4535| [twitter, LinkedIn]|  JulesDamji|\n",
      "|  2|https://tinyurl.2| 5/5/2018| 8908| [twitter, LinkedIn]| BrookeWenig|\n",
      "|  3|https://tinyurl.3| 6/7/2019| 7659|[web, twitter, FB...|    DennyLee|\n",
      "|  4|https://tinyurl.4|5/12/2018|10568|       [twitter, FB]|TathagataDas|\n",
      "|  5|https://tinyurl.5|5/14/2014|40578|[web, twitter, FB...|MateiZaharia|\n",
      "|  6|https://tinyurl.6| 3/2/2015|25568| [twitter, LinkedIn]|  ReynoldXin|\n",
      "+---+-----------------+---------+-----+--------------------+------------+\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# 7. concat, drop 删掉列\n",
    "blog_df.withColumn(\"Name\", concat(col('First'), col('Last'))) \\\n",
    "    .drop('First', 'Last').show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+---+----------+-------+-----------------+---------+-----+--------------------+\n",
      "| Id|First Name|   Last|              Url|Published| Hits|           Campaigns|\n",
      "+---+----------+-------+-----------------+---------+-----+--------------------+\n",
      "|  1|     Jules|  Damji|https://tinyurl.1| 1/4/2016| 4535| [twitter, LinkedIn]|\n",
      "|  2|    Brooke|  Wenig|https://tinyurl.2| 5/5/2018| 8908| [twitter, LinkedIn]|\n",
      "|  3|     Denny|    Lee|https://tinyurl.3| 6/7/2019| 7659|[web, twitter, FB...|\n",
      "|  4| Tathagata|    Das|https://tinyurl.4|5/12/2018|10568|       [twitter, FB]|\n",
      "|  5|     Matei|Zaharia|https://tinyurl.5|5/14/2014|40578|[web, twitter, FB...|\n",
      "|  6|   Reynold|    Xin|https://tinyurl.6| 3/2/2015|25568| [twitter, LinkedIn]|\n",
      "+---+----------+-------+-----------------+---------+-----+--------------------+\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# 8. 重命名列\n",
    "blog_df.withColumnRenamed(\"First\", \"First Name\").show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "root\n",
      " |-- Id: string (nullable = true)\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# 9. 转换列的类型\n",
    "blog_df.select(col('Id').cast(StringType())).printSchema()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+------+\n",
      "|Unique|\n",
      "+------+\n",
      "|     1|\n",
      "|     2|\n",
      "|     3|\n",
      "|     4|\n",
      "|     5|\n",
      "|     6|\n",
      "+------+\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# 10. alias取别名\n",
    "blog_df.select(col('Id').alias('Unique')).show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "+---+---------+-------+-----------------+---------+-----+----------------------------+\n",
      "|Id |First    |Last   |Url              |Published|Hits |Campaigns                   |\n",
      "+---+---------+-------+-----------------+---------+-----+----------------------------+\n",
      "|5  |Matei    |Zaharia|https://tinyurl.5|5/14/2014|40578|[web, twitter, FB, LinkedIn]|\n",
      "|6  |Reynold  |Xin    |https://tinyurl.6|3/2/2015 |25568|[twitter, LinkedIn]         |\n",
      "|4  |Tathagata|Das    |https://tinyurl.4|5/12/2018|10568|[twitter, FB]               |\n",
      "|2  |Brooke   |Wenig  |https://tinyurl.2|5/5/2018 |8908 |[twitter, LinkedIn]         |\n",
      "|3  |Denny    |Lee    |https://tinyurl.3|6/7/2019 |7659 |[web, twitter, FB, LinkedIn]|\n",
      "|1  |Jules    |Damji  |https://tinyurl.1|1/4/2016 |4535 |[twitter, LinkedIn]         |\n",
      "+---+---------+-------+-----------------+---------+-----+----------------------------+\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# 11. sort 排序\n",
    "df.sort(col('Hits').desc()).show(truncate=False)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3.7.13 ('pyspark_env')",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.13"
  },
  "orig_nbformat": 4,
  "vscode": {
   "interpreter": {
    "hash": "e3d6b965825ec263f57a3c14b14d6a82462b0f70a0e4ad31b1f27104ad7a4eac"
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
