package com.shujia.spark

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

object Demo9ReduceByKey {
  def main(args: Array[String]): Unit = {

    val conf: SparkConf = new SparkConf().setMaster("local[4]").setAppName("map")
    val sc: SparkContext = new SparkContext(conf)


    val rdd1: RDD[String] = sc.textFile("spark/data/students.txt")


    val rdd2: RDD[(String, Int)] = rdd1.map(line => {
      val clazz: String = line.split(",")(4)

      (clazz, 1)
    })

    /**
      * reduceByKey  通过key对value进行聚合
      *
      * 1、会在map进行预聚合
      * 2、在reduce进行汇总
      *
      * 减少shuffle过程网络传输的数据量
      *
      *
      * 能使用reducebukey的时候尽量使用reduceByKey
      *
      */


    rdd2
      .reduceByKey(_ + _)
      .foreach(println)


  }
}
