package com.shujia.spark.core

import org.apache.spark.broadcast.Broadcast
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

object Demo24MapJoin {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf()
    conf.setMaster("local")
    conf.setAppName("Demo24MapJoin")

    val sc: SparkContext = new SparkContext(conf)

    val stuRDD: RDD[String] = sc.textFile("spark/data/stu/students.txt")
    val scoRDD: RDD[String] = sc.textFile("spark/data/stu/score.txt")

    val stuKVRDD: RDD[(String, String)] = stuRDD.map(line => (line.split(",")(0), line))
    val scoKVRDD: RDD[(String, String)] = scoRDD.map(line => (line.split(",")(0), line))

    stuKVRDD.join(scoKVRDD).foreach(println)

    // 将stuKVRDD变成本地集合Map再实现关联
    val stuLocalMap: collection.Map[String, String] = stuKVRDD.collectAsMap()

    scoKVRDD
      // 没有shuffle实现join：MapJoin
      // MapJoin适用于大表关联小表的场景，可以将小表进行广播
      .map(t2 => {
        val stuLine: String = stuLocalMap.getOrElse(t2._1, "")
        (t2._1, (stuLine, t2._2))
      }).foreach(println)

    // 如果在算子内部使用了外部的集合，则可以将集合进行广播
    val stuLocalMapBro: Broadcast[collection.Map[String, String]] = sc.broadcast(stuLocalMap)
    scoKVRDD
      // 没有shuffle实现join：MapJoin
      .map(t2 => {
        val stuLine: String = stuLocalMapBro.value.getOrElse(t2._1, "")
        (t2._1, (stuLine, t2._2))
      }).foreach(println)

    while (true) {

    }

  }

}
