package com.mjf.spark.day04

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * 转换算子-combineByKey
 */
object Spark06_Transformation_combineByKey {
  def main(args: Array[String]): Unit = {

    // 创建SparkConf配置文件
    val conf = new SparkConf().setMaster("local[*]").setAppName("Spark06_Transformation_combineByKey")
    // 创建SparkContext对象
    val sc = new SparkContext(conf)

    // 需求：求出每个学生的平均成绩
    val scoreRDD: RDD[(String, Int)] = sc.makeRDD(List(
      ("lucy", 90), ("jack", 60), ("lucy", 96), ("jack", 62), ("lucy", 100), ("jack", 50)))

    /*
    // 方案一  使用groupByKey
    val groupRDD: RDD[(String, Iterable[Int])] = scoreRDD.groupByKey()  // (lucy,CompactBuffer(90, 96, 100))

    // 如果分组后的某个组的数据量比较大，会造成单点压力
    val resRDD: RDD[(String, Int)] = groupRDD.map {
      case (name, scoreSeq) =>
        (name, scoreSeq.sum / scoreSeq.size)
    }
    */

    /*
    // 方案二  使用reduceByKey
    // 对RDD中的数据进行结构的转换  (lucy,(90,1))
    val mapRDD: RDD[(String, (Int, Int))] = scoreRDD.map {
      case (name, score) =>
        (name, (score, 1))
    }

    // 通过reduceByKey对当前学生进行聚合
    val reduceRDD: RDD[(String, (Int, Int))] = mapRDD.reduceByKey(
      (t1, t2) =>
        (t1._1 + t2._1, t1._2 + t2._2)
    )

    val resRDD: RDD[(String, Int)] = reduceRDD.map {
      case (name, (score, count)) =>
        (name, score / count)
    }
    */

    // 方案三  使用combineByKey
    /**
     * createCombiner: V => C,        对RDD当前key取出的第一个value做一个初始化
     * mergeValue: (C, V) => C,       分区内的计算规则，将当前分区的value值，合并到初始化得到的C上面
     * mergeCombiners: (C, C) => C,   分区间计算规则
     */
    val combineRDD: RDD[(String, (Int, Int))] = scoreRDD.combineByKey(
      (_, 1),
      (tuple: (Int, Int), value) => (tuple._1 + value, tuple._2 + 1),
      (t1: (Int, Int), t2: (Int, Int)) => (t1._1 + t2._1, t1._2 + t2._2 )
    )

    val resRDD: RDD[(String, Int)] = combineRDD.map {
      case (name, (score, count)) => (name, score / count)
    }

    resRDD.collect().foreach(println)

    // 关闭连接
    sc. stop()

  }
}

