package com.shujia.core

import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable
import scala.io.{BufferedSource, Source}

/**
 * 广播大变量
 */
object Demo22Broadcast {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setMaster("local").setAppName("广播变量")

    val sc: SparkContext = new SparkContext(conf)

    val bs: List[String] = Source.fromFile("spark/data/students.txt").getLines().toList
    //map1变量在Driver端
    //会随着task任务一并发送到executor中执行，后期随着map1的数据量变大
    //也就意味着，每次发送任务，附带的数据量就会很大，无形之中，降低的执行速度
    val map1: mutable.Map[String, String] = new mutable.HashMap[String, String]()
    for (elem <- bs) {
      val array1: Array[String] = elem.split(",")
      val id: String = array1(0)
      val name: String = array1(1)
      val age: String = array1(2)
      val gender: String = array1(3)
      val clazz: String = array1(4)
      map1.put(id, name + "," + age + "," + gender + "," + clazz)
    }

    /**
     * 广播变量
     * 使用SparkContext中的一个功能，将Driver端的变量广播到executor执行的节点上的blockManager中
     */
    val bc: Broadcast[mutable.Map[String, String]] = sc.broadcast(map1)


    val scoreRDD: RDD[String] = sc.textFile("spark/data/score.txt")

    //未使用广播变量
//    val resRDD: RDD[(String, String, String)] = scoreRDD.map((line: String) => {
//      val array1: Array[String] = line.split(",")
//      val id: String = array1(0)
//      //      通过map1的变量，通过键获取值
//      val info: String = map1.getOrElse(id, "查无此人")
//      val score: String = array1(2)
//      (id, info, score)
//    })

    //使用广播变量
    val resRDD: RDD[(String, String, String)] = scoreRDD.map((line: String) => {
      val array1: Array[String] = line.split(",")
      val id: String = array1(0)
      //通过广播过来的大变量，进行关联数据
      val map2: mutable.Map[String, String] = bc.value
      val info: String = map2.getOrElse(id, "查无此人")
      val score: String = array1(2)
      (id, info, score)
    })

    resRDD.foreach(println)

  }
}
