import org.apache.spark.{HashPartitioner, SparkConf, SparkContext}

/**
  * @author td
  * @date 2018/3/28
  */
object combineByKeyTestObject {

  case class ScoreDetail(StudentName:String,Subject:String,score:Float);

  val scores = List(
    ScoreDetail("xiaoming","Math",98),
    ScoreDetail("xiaoming", "English", 88),
    ScoreDetail("wangwu", "Math", 75),
    ScoreDetail("wangwu", "English", 78),
    ScoreDetail("lihua", "Math", 90),
    ScoreDetail("lihua", "English", 80),
    ScoreDetail("zhangsan", "Math", 91),
    ScoreDetail("zhangsan", "English", 80)
  )
  // 将集合转为二元组
  val scoresWithKey = for (i<-scores) yield (i.StudentName,i);

  def  combineByKeyTest(sparkContext: SparkContext): Unit = {
    val scoresWithKeyRdd = sparkContext.parallelize(scoresWithKey).partitionBy(new HashPartitioner(2));

    scoresWithKeyRdd.foreachPartition(partition => println(partition.length))
    //scoresWithKeyRdd.combineByKey()
    val combineRDD = scoresWithKeyRdd.combineByKey((x:ScoreDetail)=>(x.score,1),

      (acc: (Float,Int), x:ScoreDetail)=>(acc._1+x.score,acc._2+1),

      (acc1:(Float,Int),acc2:(Float,Int))=>(acc1._1+acc2._1,acc1._2+acc2._2));

     val mapRdd = combineRDD.map(item=>{
      (item._1,item._2._1/item._2._2)
    })
    mapRdd.collect().foreach(println);
  }

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("WordCount").setMaster("local");;
    val sc = new SparkContext(conf);
    combineByKeyTest(sc);
  }


}
