package com.xuzz.study.spark.java;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import scala.Tuple2;

import java.util.Arrays;
import java.util.function.Function;
import java.util.regex.Pattern;

/**
 * 非流式计算
 *
 * spark-submit --master local[3] --executor-memory 512m --class com.xuzz.study.spark.java.WordCount1  spark-1.0-SNAPSHOT.jar
 *
 *  执行完了，可以cat wordcount/* 看到结果
 */
public class WordCount1 {
    private static final Pattern SPACE = Pattern.compile(" ");

    public static void main(String[] args)
    {
        SparkConf sparkConf = new SparkConf().setAppName("sparkBoot").setMaster("local");
        JavaSparkContext sparkContext = new JavaSparkContext(sparkConf);

        JavaRDD<String> lines = sparkContext.textFile("/opt/spark-2.4.4/test/data").cache();

        JavaRDD<String> words = lines.flatMap((FlatMapFunction<String,String>) s-> Arrays.asList(SPACE.split(s)).iterator());
        JavaPairRDD<String,Integer> wordsOnes = words.mapToPair((PairFunction<String,String,Integer>)s->new Tuple2<>(s,1));
        JavaPairRDD<String,Integer> wordsCount = wordsOnes.reduceByKey((Function2<Integer,Integer,Integer>)(value,toValue)->value+toValue);

        wordsCount.saveAsTextFile("/opt/spark-2.4.4/test/wordcount");
    }
}
