package cn.lsh.spark;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.util.LongAccumulator;

import java.util.concurrent.atomic.AtomicReference;

public class SparkParam {

	public static void main(String[] args) {
		accumulator();
	}

	public static void accumulator() {
		SparkConf conf = new SparkConf();
		conf.setMaster("local").setAppName("测试广播变量");
		JavaSparkContext jsc = new JavaSparkContext(conf);
		JavaRDD<String> lines = jsc.textFile("file:/bigdata/hadoop-test/input/firend/input/friend_list.txt");
		//创建累加器，并设置累加器名字。有下面两种方式：1、sc.longAccumulator;2、先new再register
		//注册的累加器会被发送到Executor
		LongAccumulator longAccumulator = jsc.sc().longAccumulator("counter");
		// LongAccumulator longAccumulator = new LongAccumulator();
		// jsc.sc().register(longAccumulator, "counter");
		lines.foreach(l -> {
			System.out.println(l);
			longAccumulator.add(1);
			System.out.println("--------------" + longAccumulator.value());
		});
		System.out.println("***************");
		System.out.println(longAccumulator.value());
	}


	public static void error() {
		/* 初始话代码在driver端执行 */
		SparkConf conf = new SparkConf();
		conf.setMaster("local").setAppName("测试广播变量");
		JavaSparkContext sc = new JavaSparkContext(conf);
		JavaRDD<String> lines = sc.textFile("file:/bigdata/hadoop-test/input/firend/input/friend_list.txt");
		Integer i = 0;
		AtomicReference<Integer> t = new AtomicReference<>();

		/* 算子代码在worker端执行 */
		lines.foreach(l -> {
			System.out.println(l);
			t.set(add(i));
		});
		System.out.println("***************");
		/* 输出null */
		System.out.println(t.get());
	}


	private static int add(int i) {
		return ++i;
	}
}
