package com.shujia.spark.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo06GroupByKey {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setAppName("GroupBYKey").setMaster("local")

    val sc = new SparkContext(conf)

    //读取words文件
    val linesRDD: RDD[String] = sc.textFile("data/words.txt")

    val wordsRDD: RDD[String] = linesRDD.flatMap(_.split(","))

    //将wordsRDD转化成kv格式

    val kvRDD: RDD[(String, Int)] = wordsRDD.map(k => (k,1))

    /**
      * 不同点：
      * groupByKey：通过key进行分组，将value放入迭代器中
      *
      * groupBy：指定一个分组的列
      *
      * 相同点：都会产生shuffle
      */
    val groupByKeyRDD: RDD[(String, Iterable[Int])] = kvRDD.groupByKey()
    groupByKeyRDD.map{
      case(word:String,values:Iterable[Int]) =>
        (word,values.size)
    }.foreach(println)
  }
}
