package com.shujia.spark.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo10Join {
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf()
    conf.setMaster("local")
    conf.setAppName("reduceByKey")

    val sc = new SparkContext(conf)

    val idAndNameRDD: RDD[(String, String)] = sc.parallelize(
      List(
        ("001", "张三"),
        ("002", "李四"),
        ("003", "王五"),
        ("004", "赵六")
      ))

    val idAndAgeRDD: RDD[(String, Int)] = sc.parallelize(
      List(
        ("001", 23),
        ("002", 24),
        ("003", 25),
        ("005", 26)
      ))

    /**
     * join： 对两个rdd进行关联
     *
     */

    /**
     * inner join,两个rdd都有才能关联上
     */
    val innerJoinRDD: RDD[(String, (String, Int))] = idAndNameRDD.join(idAndAgeRDD)
    //解析数据
    innerJoinRDD
      .map {
        case (id: String, (name: String, age: Int)) => (id, name, age)
      }
      .foreach(println)


    /**
     * left join: 以左边为基础关联
     *
     */
    val leftJoinRDD: RDD[(String, (String, Option[Int]))] = idAndNameRDD.leftOuterJoin(idAndAgeRDD)

    //解析数据
    leftJoinRDD
      .map {
        //关联上处理方式
        case (id: String, (name: String, Some(age))) => (id, name, age)
        //没有关联上处理方式
        case (id: String, (name: String, None)) => (id, name, 0)
      }
      .foreach(println)


    /**
     * full join : 以两个表为基础
     */
    val fullJoinRDD: RDD[(String, (Option[String], Option[Int]))] = idAndNameRDD.fullOuterJoin(idAndAgeRDD)

    //解析数据
    fullJoinRDD
      .map {
        //都有
        case (id: String, (Some(name), Some(age))) => (id, name, age)
        //左边没有右边有
        case (id: String, (None, Some(age))) => (id, "默认值", age)
        //左边有右边没有
        case (id: String, (Some(name), None)) => (id, name, 0)
      }
      .foreach(println)

  }

}
