package com.spark.core.transformation

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * groupByKey
 * 根据key 去将相同的key 对应的value合并在一起
 * （K,V）=>(K,[V])
 */
object Demo22_groupByKey {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local").setAppName("groupByKey")
    val sc = new SparkContext(conf)
    sc.setLogLevel("error")

    // name, score
    val rdd = sc.parallelize(List[(String, Double)](("zhangsan", 66.5),
      ("lisi", 33.2), ("zhangsan", 66.7), ("lisi", 33.4),
      ("zhangsan", 66.8), ("wangwu", 29.8)))

    // 按照key分组, 因为key还是原来的key, 所以在迭代器中只包含了value
    // groupByKey方法没有参数, 直接返回
    val result: RDD[(String, Iterable[Double])] = rdd.groupByKey()
    result.foreach(println)

  }

}
