package core.rdd.广播变量;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.broadcast.Broadcast;

import java.util.Arrays;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;


/**
 * RDD默认的数据传输以TASK为单位（可以理解CPU核数），如果执行一个任务executor，分了6个TASK,那么就需要把数据复制六遍，会造成严重的性能问题。
 * 因此需要对需要传递的数据进行封装，将封装好的以executor为单位进行发送。
 * Broadcast<List<String>> broadcastList = sc.broadcast(Arrays.asList(
 *                     "A", "2", "C", "D", "E", "F", "G", "H", "9", "10"
 *             ));
 *在使用的时候需要return broadcastList.value().contains(v1)==true;进行使用
 *
 *
 * 广播变量只会被发送到各个节点一次，作为只读值处理
 */
public class Spark02_broadcast {
    public static void main(String[] args) {
        /**
         *flatMap 是将多集合转换成一个
         */
        // 配置SparkConf指向你的Spark master URL
        SparkConf conf = new SparkConf()
                .setAppName("Spark01_broadcast") // 应用名称
                .setMaster("local[*]"); // 替换成你的master地址
        JavaSparkContext sc = new JavaSparkContext(conf);
        // 创建JavaSparkContext，它是与集群交互的主要入口点
        try {
            Broadcast<List<String>> broadcastList = sc.broadcast(Arrays.asList(
                    "A", "C", "D", "E", "F", "G", "H", "9", "10"
            ));
            List<String> asList2 = Arrays.asList(
                    "A", "2", "C"
            );

            JavaRDD<String> parallelize = sc.parallelize(asList2);


            JavaRDD<String> filterRdd = parallelize.filter(new Function<String, Boolean>() {
                @Override
                public Boolean call(String v1) throws Exception {
                    return broadcastList.value().contains(v1)==true;
                }
            });

            filterRdd.collect().forEach(System.out::println);
        } finally {
            sc.close();
        }
    }
}
