package com.shujia.spark.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo17Join {
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf()
    conf.setMaster("local")
    conf.setAppName(this.getClass.getSimpleName.replace("$", ""))
    val sc = new SparkContext(conf)

    val stuRDD: RDD[String] = sc.parallelize(List(
      "1500100001,施笑槐,22,女,文科六班"
      , "1500100003,单乐蕊,22,女,理科六班"
      , "1500100004,葛德曜,24,男,理科三班"
    ))

    val scoreRDD: RDD[String] = sc.parallelize(List(
      "1500100001,1000001,98"
      , "1500100001,1000002,5"
      , "1500100001,1000003,137"
      , "1500100001,1000004,29"
      , "1500100001,1000006,52"
      , "1500100002,1000001,139"
      , "1500100002,1000002,102"
      , "1500100002,1000003,44"
      , "1500100002,1000004,18"
      , "1500100002,1000005,46"
      , "1500100002,1000006,91"
      , "1500100003,1000001,48"
      , "1500100003,1000002,132"
      , "1500100003,1000003,41"
      , "1500100003,1000007,32"
      , "1500100003,1000008,7"
      , "1500100003,1000009,99"
    ))


    /*
    关联Join：
      内连接：inner join / join
      外连接：outer join
         左外连接：left join / left outer join
         右外连接：right join / right outer join
         全外连接：full join / full outer join
         outer可以省略
     */

    // 在Spark Core中需要KV格式的RDD才可以进行Join
    val stuKVRDD: RDD[(String, String)] = stuRDD.map(line => (line.split(",")(0), line))
    val scoreKVRDD: RDD[(String, String)] = scoreRDD.map(line => (line.split(",")(0), line))

    stuKVRDD
      .join(scoreKVRDD) // 能够关联上的才会保留
    //      .foreach(println) // 11 条数据


    stuKVRDD
      .leftOuterJoin(scoreKVRDD) // 以左表数据为准
    //      .foreach(println) // 12 条数据

    stuKVRDD
      .rightOuterJoin(scoreKVRDD) // 以右表数据为准
    //      .foreach(println) // 17 条数据

    stuKVRDD
      .fullOuterJoin(scoreKVRDD) // 左右表数据都会保留
      .foreach(println) // 18 条数据

  }

}
