package com.shujia.spark.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo14Join {
  def main(args: Array[String]): Unit = {


    val conf = new SparkConf()

    conf.setAppName("union")

    conf.setMaster("local")

    val sc = new SparkContext(conf)


    val idNameRDD: RDD[(String, String)] = sc.parallelize(List(
      ("000", "晓伟"),
      ("001", "张三"),
      ("002", "李四"),
      ("003", "王五"))
    )
    val idAgeRDD: RDD[(String, Int)] = sc.parallelize(List(
      ("001", 23),
      ("002", 24),
      ("003", 25),
      ("004", 23))
    )

    /**
      * innerJoin: 两个表都有才能关联上
      *
      */

    val innerJoinRDD: RDD[(String, (String, Int))] = idNameRDD.join(idAgeRDD)

    //整理数据
    innerJoinRDD
      .map {
        case (id: String, (name: String, age: Int)) =>
          (id, name, age)
      }
      .foreach(println)


    /**
      * leftOuterJoin： 以左表为基础，如果右表没有这个key，补NOne
      *
      * Option: 可选择的值，有值或者没有值
      */


    val leftJoinRDD: RDD[(String, (String, Option[Int]))] = idNameRDD.leftOuterJoin(idAgeRDD)

    //整理数据
    leftJoinRDD
      .map {

        //关联上的处理方式
        case (id: String, (name: String, Some(age))) =>
          (id, name, age)

        //没有关联上的处理方式
        case (id: String, (name: String, None)) =>
          //给年龄一个默认值
          (id, name, 0)

      }
      .foreach(println)

    /**
      *
      * fullOuterJoin: 以两个表为基础，有一边有数据就会出来结果，列一边补None
      *
      */


    val fullJoinRDD: RDD[(String, (Option[String], Option[Int]))] = idNameRDD.fullOuterJoin(idAgeRDD)


    fullJoinRDD
      .map {
        case (id: String, (Some(name), None)) =>
          (id, name, 0)

        case (id: String, (None, Some(age))) =>
          (id, "默认值", age)

        case (id: String, (Some(name), Some(age))) =>
          (id, name, age)
      }
      .foreach(println)
  }

}
