package com.shujia.spark.core

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

object Demo11GroupBy {
  def main(args: Array[String]): Unit = {
    /**
     * groupBy：转换算子，用于分组
     * 后续可以跟上map算子做聚合操作
     */

    val conf = new SparkConf()
    conf.setMaster("local")
    conf.setAppName(this.getClass.getSimpleName.replace("$", ""))
    val sc = new SparkContext(conf)

    val stuRDD: RDD[String] = sc.textFile("spark/data/stu/students.txt")

    // 统计班级人数
    // shuffle数据量在23KB
    stuRDD
      .groupBy(s => s.split(",")(4))
      .map(kv => s"${kv._1},${kv._2.size}")
      .foreach(println)

    // 优化

    // shuffle数据量在6KB
    stuRDD
      .map(line => line.split(",")(4))
      .groupBy(c => c)
      .map(kv => s"${kv._1},${kv._2.size}")
      .foreach(println)

    // shuffle数据量在4KB
    stuRDD
      .map(line => (line.split(",")(4), 1))
      .groupByKey()
      .map(kv => s"${kv._1},${kv._2.size}")
      .foreach(println)

    // shuffle数据量在155.0B
    stuRDD
      .map(line => (line.split(",")(4), 1))
      .reduceByKey(_ + _)
      .foreach(println)

    while (true) {

    }
  }

}
