package com.lw.scalaspark.core.transformations

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Transformations_AggregateByKey_ReplaceGrapByKey {


  def main(args: Array[String]): Unit = {

    val conf = new SparkConf().setAppName("aggregateByKey").setMaster("local")
    val sc = new SparkContext(conf)
//    val data=List((1,3),(1,2),(1,4),(2,3))
    val data=List((1,(2,3)),(1,(2,2)),(1,(4,1)),(2,(3,1)))
    val rdd=sc.parallelize(data)
    //合并不同partition中的值，a，b得数据类型为zeroValue的数据类型 因为数据源不匹配导致的问题。
    def combOp(a:List[(Int,Int)],b:List[(Int,Int)]):List[(Int,Int)] ={
      a.:::(b)
    }
    def seqOp(a:List[(Int,Int)],b:(Int,Int)):List[(Int,Int)]={
      a.::(b)
    }
    val aggregateByKeyRDD=rdd.aggregateByKey(List[(Int,Int)]())(seqOp, combOp)
    val aggregateByKeyRDD2 =aggregateByKeyRDD.map(key =>(key._1,Iterator((key._2.apply(0)._1,key._2.apply(0)._2))))
    println(aggregateByKeyRDD2.take(1).toString)
    val groupbykeyRDD=rdd.groupByKey()
    groupbykeyRDD.foreach(println)

  }

}
