package com.shujia.core

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

object Demo14Join {
  def main(args: Array[String]): Unit = {

    /**
     * join：转换算子，可以实现两个RDD之间的关联操作
     * 两个RDD必须是KV格式的，而且Key的类型必须一致
     */

    val conf: SparkConf = new SparkConf()
    conf.setMaster("local")
    conf.setAppName("Demo14Join")

    val sc: SparkContext = new SparkContext(conf)

    val stuRDD: RDD[String] = sc
      .textFile("spark/data/stu/students.txt")

    val scoreRDD: RDD[String] = sc
      .textFile("spark/data/stu/score.txt")

    // 计算每个学生的总分
    val sumScoreRDD: RDD[(String, Int)] = scoreRDD
      .map(line => {
        val splits: Array[String] = line.split(",")
        (splits(0), splits(2).toInt)
      }).reduceByKey(_ + _)

    // 将学生数据变成KV形式
    // 以id作为Key，构建(name,age)二元组作为Value
    stuRDD.map(line => {
      val splits: Array[String] = line.split(",")
      (splits(0), (splits(1), splits(2).toInt))
    }).join(sumScoreRDD).map {
      case (id: String, ((name: String, age: Int), sumScore: Int)) =>
        s"$id,$name,$age,$sumScore"
    }.foreach(println)

    stuRDD.map(line => {
      val splits: Array[String] = line.split(",")
      (splits(0), (splits(1), splits(2).toInt))
    }).leftOuterJoin(sumScoreRDD).map {
      case (id: String, ((name: String, age: Int), mayBeSumScore: Option[Int])) =>
        mayBeSumScore match {
          case Some(sumScore) =>
            s"$id,$name,$age,$sumScore"
          case None =>
            s"$id,$name,$age,0"
        }
    }.foreach(println)

  }

}
