package cn.aijson.demo.rdd

import org.apache.commons.lang3.StringUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object RDDAggregateWithKey {
  def main(args: Array[String]): Unit = {
    //创建环境
    val conf: SparkConf = new SparkConf().setAppName("spark").setMaster("local[*]")
    val sc: SparkContext = new SparkContext(conf)
    sc.setLogLevel("WARN")

    //词频统计进行分区操作
    val lines: RDD[String] = sc.textFile("data/input/words.txt")
    val wordAndOneRDD: RDD[(String, Int)] = lines.filter(StringUtils.isNoneBlank(_))
      .flatMap(_.split(" "))
      .map((_, 1))
    //分组聚合


    println("################reduceByKey")
    wordAndOneRDD.reduceByKey(_ + _).foreach(println)
    println("################先分组再聚和mapValues")
    val groupedValue: RDD[(String, Iterable[Int])] = wordAndOneRDD.groupByKey()
    groupedValue.mapValues(_.sum).foreach(println)
    println("################mapValues")
    wordAndOneRDD.foldByKey(0)(_ + _).foreach(println)
    println("################aggregateByKeye(初始值)(局部聚合, 全局聚合)")
    wordAndOneRDD.aggregateByKey(0)(_ + _, _ + _).foreach(println)
  }
}
