package org.huangrui.spark.scala.core.rdd.operate.transform

import org.apache.spark.{SparkConf, SparkContext}

/**
 * @Author hr
 * @Create 2024-10-17 17:14 
 */
object Spark11_Operate_Transform_KV_groupByKey {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local[*]").setAppName("spark")
    val sc = new SparkContext(conf)
    val tuples = List(("a", 1), ("b", 2), ("a", 3), ("b", 4))

    // TODO 将数据的一个值用于分组
    //      (a, [ new Tuple2<>("a", 1), new Tuple2<>("a", 3) ] )
    //      (b, [ new Tuple2<>("b", 2), new Tuple2<>("b", 4) ] )
    //      (a, [ (a, 1), (a, 3) ])
    //      (b, [ (b, 2), (b, 4) ])
    // groupBy方法底层实现时，调用了groupByKey方法
    sc.parallelize(tuples,2).groupBy(_._1).collect().foreach(println)
    // TODO groupByKey方法作用是将KV类型的数据直接按照K对V进行分组
    //     (a, [1,3])
    //     (b, [2,4])
    sc.parallelize(tuples,2).groupByKey().collect().foreach(println)

    sc.stop()
  }
}
