package com.niit.spark.rdd.test

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * Date:2025/4/28
 * Author：Ys
 * Description:
 */
object groupByKey {

  def main(args: Array[String]): Unit = {
    val sparkConf = new
        SparkConf().setMaster("local[*]").setAppName("GroupByKeyExercise")
    val sc = new SparkContext(sparkConf)
    sc.setLogLevel("ERROR")
    val salesRDD = sc.parallelize(Seq(("ProductA", 100.5), ("ProductB", 200.8),
      ("ProductA", 150.2), ("ProductC", 300.0)))
        // productA -->[100.5,150.2]  productB -->[200.8]  productC -->[300.0]
    val gpRdd: RDD[(String, Iterable[Double])] = salesRDD.groupByKey()
          //productA --> 100.5+150.2     mapValues 是根据Key来对里面的值进行操作的，不用理会Key，直接看key对应的value
    gpRdd.collect().foreach(println)
    val resRdd: RDD[(String, Double)] = gpRdd.mapValues(iter => iter.sum) //mapByKey
    resRdd.collect().foreach(println)


    sc.stop()
  }

}
