package cn.doitedu.day03

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object T11_FullOutJoinDemo {

  def main(args: Array[String]): Unit = {

    //1.创建SparkConf
    val conf = new SparkConf().setAppName("WordCount")
      .setMaster("local[4]")
    val sc = new SparkContext(conf)

    //通过并行化的方式创建一个RDD
    val rdd1: RDD[(String, Int)] = sc.parallelize(List(("tom", 1), ("tom", 2), ("jerry", 3), ("kitty", 2), ("jerry", 4)), 3)

    //通过并行化的方式再创建一个RDD
    val rdd2: RDD[(String, Double)] = sc.parallelize(List(("jerry", 2), ("tom", 1.1), ("shuke", 2), ("jerry", 4.4)), 2)

    val joined1: RDD[(String, (Option[Int], Option[Double]))] = rdd1.fullOuterJoin(rdd2)
    val cogrouped: RDD[(String, (Iterable[Int], Iterable[Double]))] = rdd1.cogroup(rdd2)

    val joined: RDD[(String, (Option[Int], Option[Double]))] = cogrouped.flatMapValues{
      case(it1, Seq()) => it1.map(v1 => (Some(v1), None))
      case(Seq(), it2) => it2.map(v2 => (None, Some(v2)))
      case(it1, it2) => for(v1 <- it1; v2 <- it2) yield (Some(v1), Some(v2))
    }

    joined.foreach(println)

  }
}
