package com.shujia.core

import com.shujia.core.Demo07GroupBy.StuGrp
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

object Demo24MapJoin {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf()
    conf.setAppName("Demo24MapJoin")
    conf.setMaster("local")

    val sc: SparkContext = new SparkContext(conf)

    val stuLineRDD: RDD[String] = sc.textFile("spark/data/students.txt")

    // 将每条数据转换成样例类对象
    val stuRDD: RDD[StuGrp] = stuLineRDD.map(line => {
      val splits: Array[String] = line.split(",")
      StuGrp(splits(0), splits(1), splits(2).toInt, splits(3), splits(4))
    })

    // 加载分数的数据 以id作为key，整体作为value
    val scoreLineRDD: RDD[(String, String)] = sc.textFile("spark/data/score.txt").map(line => (line.split(",")(0), line.replace(",", "|")))

    /**
     * MapJoin适合大表关联小表的场景
     * 将小表进行广播到每一个Map任务中，在Map阶段就可以实现关联
     * 不需要Reduce任务，即没有Shuffle的过程，所以效率更好一点
     *
     * 将学生数据作为小表进行广播
     */

    // 使用id关联学生、分数数据 [id,name,clazz,subject,score]

    val stuMap: Map[String, String] = stuRDD.map(stu => (stu.id, stu.name + "|" + stu.clazz)).collect().toMap

    val stuMapBro: Broadcast[Map[String, String]] = sc.broadcast(stuMap)

    // 逐条处理分数的数据 通过id从学生信息的广播变量中获取学生name以及clazz
    scoreLineRDD.map(t2 => {
      val id: String = t2._1
      val subjectAndScore: String = t2._2
      val nameAndClazz: String = stuMapBro.value.getOrElse(id, "")
      (id,nameAndClazz,subjectAndScore)
    }).foreach(println)

    while (true){

    }

  }

}
