package cn.doitedu.day03

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable.ArrayBuffer

/*
 * Join实现inner join
 */
object T10_JoinDemo {

  def main(args: Array[String]): Unit = {

    //1.创建SparkConf
    val conf = new SparkConf().setAppName("WordCount")
      .setMaster("local[4]")
    val sc = new SparkContext(conf)

    //通过并行化的方式创建一个RDD
    val rdd1: RDD[(String, Int)] = sc.parallelize(List(("tom", 1), ("tom", 2), ("jerry", 3), ("kitty", 2), ("jerry", 4)), 3)

    //通过并行化的方式再创建一个RDD
    val rdd2: RDD[(String, Double)] = sc.parallelize(List(("jerry", 2), ("tom", 1.1), ("shuke", 2), ("jerry", 4.4)), 2)

    val rdd3: RDD[(String, (Int, Double))] = rdd1.join(rdd2)

    //使用cogroup实现join功能
    val grouped: RDD[(String, (Iterable[Int], Iterable[Double]))] = rdd1.cogroup(rdd2)

    val resRDD: RDD[(String, (Int, Double))] = grouped.flatMapValues(tp => {
      for (v1 <- tp._1; v2 <- tp._2) yield (v1, v2)
    })

    resRDD.foreach(println)
  }
}
