package com.ada.spark.rddoperator

import org.apache.spark.{SparkConf, SparkContext}

/**
  * 作用：在类型为(K,V)和(K,W)的RDD上调用，返回一个(K,(Iterable<V>,Iterable<W>))类型的RDD
  */
object Spark28_cogroup {

    def main(args: Array[String]): Unit = {
        //创建SparkConf
        val conf = new SparkConf().setAppName("Spark28_cogroup").setMaster("local[*]")
        //创建Spark上下文对象
        val sc = new SparkContext(conf)

        val rdd = sc.parallelize(Array((1, "a"), (2, "b"), (3, "c")))

        val rdd1 = sc.parallelize(Array((1, 4), (2, 5), (3, 6), (4, 7)))

        //在类型为(K,V)和(K,W)的RDD上调用，返回一个(K,(Iterable<V>,Iterable<W>))类型的RDD
        val result = rdd.cogroup(rdd1)

        println(result.collect().mkString(","))
        //(4,(CompactBuffer(),CompactBuffer(7))),(1,(CompactBuffer(a),CompactBuffer(4))),(2,(CompactBuffer(b),CompactBuffer(5))),(3,(CompactBuffer(c),CompactBuffer(6)))
    }

}
