package com.shujia.spark

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo11Join {

  def main(args: Array[String]): Unit = {

    val conf: SparkConf = new SparkConf()
      .setAppName("make")
      .setMaster("local")


    val sc = new SparkContext(conf)


    val studentRDD: RDD[String] = sc.textFile("spark/data/students.txt")
    val scoreRDD: RDD[String] = sc.textFile("spark/data/score.txt")


    //将rdd转换成kv格式
    //以学号进行关联,就以学号作为key
    val studentKVRDD: RDD[(String, String)] = studentRDD.map(line => {
      val id: String = line.split(",")(0)

      (id, line)
    })

    val scoreKVRDD: RDD[(String, String)] = scoreRDD.map(line => {
      val id: String = line.split(",")(0)

      (id, line)
    })


    /**
      * join 对两个kv格式的rdd进行join  默认是inner join
      *
      */

    val joinRDD: RDD[(String, (String, String))] = studentKVRDD.join(scoreKVRDD)

    //关联之后整理数据格式
    val resulyRDD: RDD[String] = joinRDD.map {
      case (id, (stuInfo, scoreInfo)) => {
        s"$stuInfo|$scoreInfo"
      }
    }


    resulyRDD.foreach(println)

    /**
      * leftOuterJoin : 左关联,以左表为基础,  如果右表没有补空
      *
      *
      * Option: 有两种取值,要么有值,要么为None
      */


    val leftJoinRDD: RDD[(String, (String, Option[String]))] = studentKVRDD.leftOuterJoin(scoreKVRDD)


    val leftRDD: RDD[String] = leftJoinRDD.map {
      //关联上了怎么处理
      case (id, (stuInfo, Some(scoreInfo))) => {
        s"$stuInfo|$scoreInfo"
      }

      //没有关联上怎么处理
      case (id, (stuInfo, None)) => {
        s"$stuInfo|默认值"
      }
    }

    leftRDD.foreach(println)


  }

}
