package com.dxf.bigdata.D05_spark_again

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * 分区内 a规则 + 分区间b规则
 * reduceByKey 是分区内和分区间都是同一个规则c
 */
object ByKey说明 {

  def main(args: Array[String]): Unit = {

    val sparkConf = new SparkConf().setMaster("local[*]").setAppName("app")

    val sc = new SparkContext(sparkConf)

    val rdd: RDD[(String, Int)] = sc.makeRDD(List(("a", 1), ("a", 2),("b", 3),("b", 4),("b", 5),("b", 6)))

    //  combineByKeyWithClassTag[V]((v: V) => v, func, func, partitioner)
    val redRDD: RDD[(String, Int)] = rdd.reduceByKey(_ + _)

    //  combineByKeyWithClassTag[V]((v: V) => cleanedFunc(createZero(), v),cleanedFunc, cleanedFunc, partitioner)
    val foldRdd: RDD[(String, Int)] = rdd.foldByKey(0)(_ + _)

    //  combineByKeyWithClassTag[U]((v: V) => cleanedSeqOp(createZero(), v), cleanedSeqOp, combOp, partitioner)
    val aggRDD: RDD[(String, Int)] = rdd.aggregateByKey(0)(_ + _, _ + _)

    //  combineByKeyWithClassTag(createCombiner, mergeValue, mergeCombiners,  partitioner, mapSideCombine, serializer)(null)
    val combRDD: RDD[(String, Int)] = rdd.combineByKey(a => a, (a: Int, v) =>a + v, (v1: Int, v2: Int) => v1 + v2)

    //发现ByKey底层调用  combineByKeyWithClassTag[V]
    // 即底层都是 combineByKey

  }

}
