package com.shujia.core

import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.util.LongAccumulator
import org.apache.spark.{SparkConf, SparkContext}

object Demo21Broadcast {
  def main(args: Array[String]): Unit = {


    val conf = new SparkConf()

    conf.setMaster("local")
    conf.setAppName("app")

    val sc = new SparkContext(conf)

    val studentsRDD: RDD[String] = sc.textFile("spark/data/students.csv", 100)
    println(studentsRDD.getNumPartitions)

    //    val ids: List[String] = List("1500100005", "1500100009",
    //      "1500100014", "1500100020", "1500100024")
    //
    //    val filterRDD: RDD[String] = studentsRDD
    //      .filter(line => {
    //        val split: Array[String] = line.split(",")
    //        val id: String = split(0)
    //        ids.contains(id)
    //      })
    //
    //    filterRDD.foreach(println)


    /**
     * 广播变量
     * 在后面的Spark sql中 map join的底层时广播变量
     */
    val ids: List[String] = List("1500100005", "1500100009",
      "1500100014", "1500100020", "1500100024")

    //将变量广播出去
    val broadIds: Broadcast[List[String]] = sc.broadcast(ids)

    val filterRDD: RDD[String] = studentsRDD
      .filter(line => {
        val split: Array[String] = line.split(",")
        val id: String = split(0)
        //获取广播变量
        val ids1: List[String] = broadIds.value
        ids1.contains(id)
      })

    filterRDD.foreach(println)



  }
}
