package com.bigdata.core.example

import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.util.LongAccumulator
import org.apache.spark.{SparkConf, SparkContext}

object BroadCastTest {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setMaster("local")
    conf.setAppName("broadCast")
    val sc = new SparkContext(conf)
    sc.setLogLevel("error")

    val list = List[String]("zhangsan", "tianqi")

    val broadList: Broadcast[List[String]] = sc.broadcast(list)

    val acc: LongAccumulator = sc.longAccumulator

    val nameRDD: RDD[String] = sc.parallelize(List[String]("zhangsan", "lisi",
      "wangwu", "zhaoliu", "tianqi"), 2)

    val filterRDD: RDD[String] = nameRDD.filter(name => {
      acc.add(1)
      // 使用了广播变量，一个Executor中只保留一个数据
      //      broadList.value.contains(name)
      // 不使用广播变量，是每个线程中复制一份数据副本
      list.contains(name)
    })

    // 先要执行job，才能触发 rdd算子代码的执行
    filterRDD.foreach(println)

    //    println(acc.value)


  }
}