package com.shujia.spark

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo8GroupByKey {
  def main(args: Array[String]): Unit = {

    val conf: SparkConf = new SparkConf()
      .setMaster("local")
      .setAppName("groupByKey")


    val sc = new SparkContext(conf)

    val linesRDD: RDD[String] = sc.textFile("data/words.txt")

    val wordsRDD: RDD[String] = linesRDD.flatMap(line => line.split(","))


    //将rdd转换成kv格式
    val kvRDD: RDD[(String, Int)] = wordsRDD.map(word => (word, 1))

    /**
      * groupByKey: 按照key进行分组，必须是kv格式的rdd,  将同一个key的value放到迭代器中
      *
      */

    val groupBykeyRDD: RDD[(String, Iterable[Int])] = kvRDD.groupByKey()

    groupBykeyRDD.foreach(println)

    val countRDD: RDD[(String, Int)] = groupBykeyRDD.map {
      case (word: String, ints: Iterable[Int]) =>
        val count: Int = ints.sum
        (word, count)
    }

    countRDD.foreach(println)

    /**
      * groupBy: 指定一个分组的罗列，返回的rdd的value包含所有的列
      * shuffle过程中需要传输的数据量groupBykKy要多，性能差一点
      *
      */


    val groupByRDD: RDD[(String, Iterable[(String, Int)])] = kvRDD.groupBy(kv => kv._1)

    groupByRDD.foreach(println)



  }

}
