package com.atguigu.sparkcore.rdd.kvs

import com.atguigu.sparkcore.rdd.kvs.MyPartitions.MyPartition
import com.atguigu.sparkcore.util.MySparkContextUtil
import org.apache.spark.{SparkConf, SparkContext}

/**
  * 通过key进行聚合
  * author 剧情再美终是戏
  * mail 13286520398@163.com
  * date 2020/1/7 6:29 
  * version 1.0
  * <p>des</p>
  * <p>1、reducerByKey在分区间是会有预聚合操作的，聚合的操作和reducer一样</p>
  * <p>2、源码：combineByKeyWithClassTag[V]((v: V) => v, func, func, partitioner)</p>
  **/
object ReducerByKey {

  def main(args: Array[String]): Unit = {

    // 获取sparkContext
    val sc = MySparkContextUtil.get(args)

    // 创建rdd
    val list = List(("female", 1), ("male", 5), ("female", 5), ("male", 2))
    val rdd = sc.makeRDD(list, 2)

    // 转换rdd
    val result = rdd.reduceByKey(_ + _)

    // 输出
    println(result.collect().mkString(","))

    // 关闭资源
    MySparkContextUtil.close(sc)
  }

}
