package com.shujia.spark.sql

import org.apache.spark.SparkContext
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

object Demo10RDDMapJOin {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("burk")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()
    import spark.implicits._
    import org.apache.spark.sql.functions._

    val sc: SparkContext = spark.sparkContext

    val studentRDD: RDD[String] = sc.textFile("data/students.txt")
    val scoreRDD: RDD[String] = sc.textFile("data/score.txt")

    val stuKVRDD: RDD[(String, String)] = studentRDD.map(line => (line.split(",")(0), line))

    val scoKVRDD: RDD[(String, String)] = scoreRDD.map(line => (line.split(",")(0), line))

    //reduce join ，会产生shuffle
    val joinRDD: RDD[(String, (String, String))] = stuKVRDD.join(scoKVRDD)

    //joinRDD.foreach(println)


    /**
      * 将小表广播出来实现mapjoin
      * 1、将小表拉取到Driver(表的大小不能超过Driver端的内存)
      * 2、将小表广播出来
      * 3、对大表使用map算子，在算子中通过学号获取学生的信息
      *
      */

    val stuList: Array[(String, String)] = stuKVRDD.collect()

    val stuMap: Map[String, String] = stuList.toMap


    //广播
    val stuBro: Broadcast[Map[String, String]] = sc.broadcast(stuMap)


    //map join
    val joinRDD2: RDD[(String, String)] = scoKVRDD.map {
      case (sId: String, line: String) =>
        //使用学号到学生表中获取学生信息
        val stuInfo: String = stuBro.value.getOrElse(sId, "")
        (line, stuInfo)
    }


    joinRDD2.foreach(println)


    while (true) {

    }
  }

}
