package com.shujia.spark.core

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

object Demo6GroupByKey {

  def main(args: Array[String]): Unit = {

    val conf: SparkConf = new SparkConf()
      .setAppName("groupByKey")
      .setMaster("local")

    val sc: SparkContext = new SparkContext(conf)

    val linesRDD: RDD[String] = sc.textFile("data/words.txt")

    val words: RDD[String] = linesRDD.flatMap(_.split(","))

    val kvRDD: RDD[(String, Int)] = words.map(word => (word,1))

    /**
      * groupByKey: 通过key进行分组，将valye 方法迭代器中
      * groupBy： 指定一个分组的列，
      *
      * 都会产生shuffle
      */

    val groupByKeyRDD: RDD[(String, Iterable[Int])] = kvRDD.groupByKey()

    val resultRDD: RDD[String] = groupByKeyRDD.map{
      case (word: String, list: Iterable[Int]) =>
        word + "\t" + list.size
    }

    resultRDD.foreach(println)

    val groupByRDD: RDD[(String, Iterable[(String, Int)])] = kvRDD.groupBy(kv => kv._1)
    groupByRDD.foreach(println)
  }

}
