package com.sinopec.pi.sparkguide

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object WordCount {
    def main(args: Array[String]): Unit = {
        // 指定 Master 为 本地两核，支持多种部署运行模式
        val conf = new SparkConf().setAppName("Spark Pi").setMaster("local[2]")
        val sc = new SparkContext(conf)

        // 英文文本，按空格分割，生成单词对，统计单词出现次数
        // 获取本地文件的路径
        val filePath = WordCount.getClass.getClassLoader.getResource("nsdi_spark.txt").toURI.getPath
        // 支持 hdfs:// 和 file://
        val fileRDD: RDD[String] = sc.textFile(filePath)

        val stopWords = Set("the", "", "a", "an", ",", "we", "this", "to", "!", "for", "in", "our",
            "with", "that", "of", "and", "is", "are", "as", "on", "not", "can", "by", "be", "it", "each", "from", "these")

        val wcRdd: RDD[(String, Int)] = fileRDD.flatMap(line => line.split(" "))
            .map(x => x.toLowerCase)             // 避免大小写
            .filter(w => !stopWords.contains(w)) // 去除常用的停用词
            .map(word => (word, 1))
            // Scala 类的隐式转换，调用的不是 RDD 的方法，而是 PairRDDFunction 的方法, 见 object RDD 提供的隐式转换
            .reduceByKey((a, b) => a + b)

        // collect 将所有的数据收集到 Driver 端（对于大数据集，不建议使用，可导致 driver 内存溢出）
        val wordCount: Array[(String, Int)] = wcRdd.collect()
        println(f"word size is: ${wordCount.length}")

        // 打印排名前 5 的单词
        val sorted = wordCount.sortBy(kv => -kv._2)
        sorted.take(5).foreach(println)

        // wait to see the web ui
        Thread.sleep(1000000)
    }
}

