package com.shujia.spark

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

object Demo9ReduceBykey {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf()
      .setMaster("local")
      .setAppName("groupByKey")


    val sc = new SparkContext(conf)

    val linesRDD: RDD[String] = sc.textFile("data/words.txt")

    val value: RDD[(Int, Int)] = linesRDD.map(x => (1, 1))

    val wordsRDD: RDD[String] = linesRDD.flatMap(line => line.split(","))

    //将rdd转换成kv格式
    val kvRDD: RDD[(String, Int)] = wordsRDD.map(word => (word, 1))


    /**
      * reduceByKey: 按照key进行聚合计算，会在Map端进行预聚合
      * 只能做简单的聚合计算
      *
      */
    //统计单词的数量
    val countRDD: RDD[(String, Int)] = kvRDD.reduceByKey((x: Int, y: Int) => x + y)

    countRDD.foreach(println)
  }

}
