package com.shujia.spark.core

import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo24Bro {
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf()

    conf.setMaster("local")

    conf.setAppName("bro")

    val sc = new SparkContext(conf)

    val studentRDD: RDD[String] = sc.textFile("data/students.txt")

    val scoreRDD: RDD[String] = sc.textFile("data/score.txt", 100)

    //在算子内不能使用其它的rdd来写代码
    /*    scoreRDD.foreach(sco => {
          studentRDD.foreach(println)
        })*/

    //将rdd的数据拉取到Driver端，构建成一个数组
    val studentArray: Array[String] = studentRDD.collect()
    //转换成kv格式
    val kvStuRDD: Array[(String, String)] = studentArray.map(stu => {
      val split: Array[String] = stu.split(",")
      val id: String = split(0)
      (id, stu)
    })
    //转换成map集合
    val stuMap: Map[String, String] = kvStuRDD.toMap

    //将一个普通的变量广播出去
    val stuMapBro: Broadcast[Map[String, String]] = sc.broadcast(stuMap)

    /**
      * mapjoin: 在map端进行表关联，不会产生shuffle
      * 当一个大表和一个小表进行关联的时候可以使用mapjoin
      *
      */


    val joinRDD: RDD[(String, String)] = scoreRDD.map(sco => {
      val split: Array[String] = sco.split(",")
      val id: String = split(0)
      //使用id到学生表的map集合中获取学生的信息
      //在算子内获取广播变量的值
      val broValue: Map[String, String] = stuMapBro.value
      val stuInfo: String = broValue.getOrElse(id, "默认值")
      (stuInfo, sco)
    })


    joinRDD.foreach(println)

    while (true){

    }

  }

}
