package com.shujia.opt

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo1aggregateByKey {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setMaster("local")
    conf.setAppName("wc")
    val sc = new SparkContext(conf)

    //2、读取数据
    //RDD:弹性的分布式数据集
    val lines: RDD[String] = sc.textFile("spark/data/words")

    //3、将一行转换成多行
    val words: RDD[String] = lines.flatMap(line => line.split(","))

    //4、转换成kv格式
    val kvs: RDD[(String, Int)] = words.map(word => (word, 1))


    /**
     * aggregateByKey: map端和reduce端的聚合函数可以封开实现
     */
    val countRDD: RDD[(String, Int)] = kvs
      .aggregateByKey(0)(
        (u, i) => u + i, //map端进行聚合的函数
        (u1, u2) => u1 + u2 //reduce端汇总的函数
      )

    countRDD.foreach(println)

  }

}
