package com.shengzai.rdd

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo9GroupByKeyOnGroupBy {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setMaster("local")
    conf.setAppName("Filter")
    val sc = new SparkContext(conf)
    val stuRDD: RDD[String] = sc.textFile("hadoop_code/src/data/students.txt")

    val mapRDD: RDD[(String, String)] = stuRDD.map(line => {
      val split: Array[String] = line.split(",")
      (split(1), split.last)
    })

    val groupByRDD: RDD[(String, Iterable[(String, String)])] = mapRDD.groupBy(
      (tuple2) => {
        tuple2._2
      }
    )
    /**
     * groupByKey返回的v中不包含k
     * groupBy返回的值中包含v
     * groupByKey shuffle过程比groupBy少 优先使用
     */
    val groupByKeyRDD: RDD[(String, Iterable[String])] = mapRDD.groupByKey()
    groupByRDD.foreach(println)
    groupByKeyRDD.foreach(println)
    while (true){

    }


  }

}
