package com.shujia.spark.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo08ReduceByKey {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf()
      .setAppName("reduceByKey")
      .setMaster("local")

    //创建spark对象

    val sc: SparkContext = new SparkContext(conf)


    //读数据
    val linesRDD: RDD[String] = sc.textFile("data/words.txt")


    //切分数据
    val wordsRDD: RDD[String] = linesRDD.flatMap(_.split(","))

    //将RDD转化成kv格式
    val kvRDD: RDD[(String, Int)] = wordsRDD.map(word => (word,1))

    /**
      * reduceByKey，对同一个key进行聚合处理
      */

   val countRDD: RDD[(String, Int)] = kvRDD.reduceByKey((i:Int,j:Int) =>i+j)

    countRDD.foreach(println)

    //如果参数只用了溢出，可以通过下划线代替

    //简写
    val count: RDD[(String, Int)] = kvRDD.reduceByKey(_+_)
    count.foreach(println)
  }
}
