package com.shujia.spark.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo9ReduceByKey {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setAppName("map")
    conf.setMaster("local")

    val sc = new SparkContext(conf)
    val studentsRDD: RDD[String] = sc.textFile("data/students.txt")

    val splitRDD: RDD[Array[String]] = studentsRDD.map(studnet => studnet.split(","))

    //取出班级和年龄
    val clazzRDD: RDD[(String, Int)] = splitRDD.map {
      case Array(_, _, age: String, _, clazz: String) =>
        (clazz, 1)
    }


    /**
     * reduceByKey: 按照key对value做聚合，需要一个聚合函数
     * reduceByKey也会产生shuffle
     *
     */
    val countRDD: RDD[(String, Int)] = clazzRDD.reduceByKey((x: Int, y: Int) => x + y)


    countRDD.foreach(println)

    /**
     * reduceByKey会在map端做预聚合，预聚合之后shuffle过程需要传输的数据量减少，性能更高
     * 尽量使用reduceByKey代替groupByKey
     * reduceByKey没有groupByKey灵活
     * 比如groupByKey之后可以计算方差， reduceByKey不行
     *
     */


    clazzRDD
      .groupByKey()
      .map(kv => (kv._1, kv._2.sum))
      .foreach(println)

    while (true) {

    }

  }

}
