package com.ada.spark.rddoperator

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
  * 作用：groupByKey也是对每个key进行操作，但只生成一个sequence。
  */
object Spark20_groupByKey {

    def main(args: Array[String]): Unit = {
        //创建SparkConf
        val conf = new SparkConf().setAppName("Spark20_groupByKey").setMaster("local[*]")
        //创建Spark上下文对象
        val sc = new SparkContext(conf)

        val words = Array("one", "two", "two", "three", "three", "three")

        val wordPairsRDD = sc.parallelize(words).map(word => (word, 1))

        //groupByKey也是对每个key进行操作，但只生成一个sequence
        val group = wordPairsRDD.groupByKey()

        val sumRDD: RDD[(String, Int)] = group.map(t => (t._1, t._2.sum))

        println(sumRDD.collect().mkString(","))
        //(two,2),(one,1),(three,3)
    }

}
