package cn.hnu.spark

import org.apache.commons.lang3.StringUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object RddDemo04 {
  def main(args: Array[String]): Unit = {
    //创建Rdd
    val conf: SparkConf = new SparkConf().setAppName("Rdd-demo").setMaster("local[2]")
    val sc = new SparkContext(conf)
    sc.setLogLevel("WARN")

    val lines: RDD[String] = sc.textFile("data/words.txt")
    val newLines: RDD[String] = lines.filter(StringUtils.isNoneBlank(_))
    val words: RDD[(String, Int)] = newLines.flatMap(_.split("\\s+")).map((_, 1))
    // 先分组再聚合
    // ("haddoop", (1,1,1,1) )
    val rdd1: RDD[(String, Iterable[Int])] = words.groupByKey()
    // ("hadoop", 4)
    val result1: RDD[(String, Int)] = rdd1.mapValues(_.sum)

    //分组聚合
    val result2: RDD[(String, Int)] = words.reduceByKey(_ + _)
    val result3 = words.foldByKey(0)(_ + _)
    ////aggregateByKeye(初始值)(局部聚合, 全局聚合)
    val result4: RDD[(String, Int)] = words.aggregateByKey(0)(_ + _, _ + _)

    result1.foreach(println)
    result2.foreach(println)
    result3.foreach(println)
    result4.foreach(println)









  }

}
