package com.shujia.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo6GroupByKey {
  def main(args: Array[String]): Unit = {
    //1、创建spark环境
    //创建配置文件对象
    val conf = new SparkConf()
    //指定spark执行默认，local：本地执行
    conf.setMaster("local")
    //spark 任务名
    conf.setAppName("wc")
    //创建spark上下文对象
    val sc = new SparkContext(conf)
    //2、读取数据
    //RDD:弹性的分布式数据集
    val linesRDD: RDD[String] = sc.textFile("spark/data/words")
    //3、将一行转换成多行
    val wordsRDD: RDD[String] = linesRDD.flatMap(line => line.split(","))
    //4、转换成kv格式
    val kvRDD: RDD[(String, Int)] = wordsRDD.map(word => (word, 1))

    /**
     * groupByKey: 按照key进行分组，只能作用在kv格式的RDD上，底层会产生shuffle
     */
    val groupByKeyRDD: RDD[(String, Iterable[Int])] = kvRDD.groupByKey()

    //统计单词的数量
    val countRDD: RDD[(String, Int)] = groupByKeyRDD
      .mapValues(iter => iter.sum)

    countRDD.foreach(println)


    /**
     * groupBy: 指定一个字段进行分组
     */
    val groupByRDD: RDD[(String, Iterable[(String, Int)])] = kvRDD
      .groupBy { case (word, _) => word }

    val countRDD2: RDD[(String, Int)] = groupByRDD.map {
      case (word, iter) =>
        val count: Int = iter.size
        (word, count)
    }
    countRDD2.foreach(println)

  }
}
