package com.spark.core.transformation

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * reduceByKey
 * 首先会根据key 去分组，然后处理每个组，将每个组内的value聚合
 * 作用在K,V格式的RDD上
 */
object Demo5_reduceByKey {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setMaster("local").setAppName("reduceByKey")
    val sc = new SparkContext(conf)
    sc.setLogLevel("error")

    val infos: RDD[(String, Int)] = sc.parallelize(List[(String, Int)](
      "zhangsan" -> 1,
      ("zhangsan", 2),
      ("zhangsan", 3),
      ("lisi", 100),
      ("lisi", 200)))

    val result: RDD[(String, Int)] = infos.reduceByKey((v1: Int, v2: Int) => {
      v1 + v2
    })

    // 打印vale和
    result.foreach(println)

  }
}
