package com.sinopec.pi.sparkguide

import org.apache.spark.sql.functions.{explode, lower}
import org.apache.spark.sql.{DataFrame, Dataset, SparkSession, functions}

object WordCountDataSet {
    def main(args: Array[String]): Unit = {
        val spark = SparkSession
            .builder()
            .appName("Spark Dataset Word Count")
            .master("local[2]")
            .getOrCreate()
        // This import is needed to use the $-notation
        import spark.implicits._
        val filePath = WordCountDataSet.getClass.getClassLoader.getResource("nsdi_spark.txt").toURI.getPath

        // 支持 hdfs:// 和 file://
        val ds: Dataset[String] = spark.read.textFile(filePath)

        val stopWords = Seq("the", "", "a", "an", ",", "we", "this", "to", "!", "for", "in", "our",
            "with", "that", "of", "and", "is", "are", "as", "on", "not", "can", "by", "be", "it", "each", "from", "these")

        val wcDF: DataFrame = ds
            .select(explode(functions.split($"value", " "))
                .alias("word"))
            .select(lower($"word").alias("word"))
            .filter(!$"word".isin(stopWords: _*))
            .groupBy("word")
            .count() // 会生成 "count" 列
            .orderBy($"count".desc)

        // collect 将所有的数据收集到 Driver 端（对于大数据集，不建议使用，可导致 driver 内存溢出）
        wcDF.show(5)

        // wait to see the web ui
        Thread.sleep(1000000)
    }
}
