package SparkStreaming;
/**
 * simultaneously  节点及同步接收
 * hence 因此
 * section 部分部件
 * wildcard 通配符
 * custom sources 自定义来源
 * acknowledged 确认
 * arbitrary  任意
 * eliminated 淘汰 消除 排除
 * primitive 原始的
 * inadvertently 非无意地，无心地
 */

import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.apache.spark.*;
import org.apache.spark.api.java.Optional;
import org.apache.spark.api.java.function.*;
import org.apache.spark.streaming.*;
import org.apache.spark.streaming.api.java.*;
import scala.Tuple2;

import java.util.Arrays;
import java.util.List;


/**基础版本 wordcount
 * @program: MySpark
 * @description
 * @author: tkk fendoukaoziji
 * @create: 2019-04-23 09:02
 **/
public class SparkStreaming {
    public static void main(String[] args) {
        Logger.getLogger("org.apache.spark").setLevel(Level.WARN); //设置日志级别warn
        SparkConf conf = new SparkConf().setAppName("repeatSparkStreaming").setMaster("local[*]");
        JavaStreamingContext ssc = new JavaStreamingContext(conf, Durations.seconds(1));
        JavaReceiverInputDStream<String> lines = ssc.socketTextStream("node3", 1234);
        JavaDStream<String> words = lines.flatMap(x -> Arrays.asList(x.split(" ")).iterator());
        JavaPairDStream<String, Integer> pairs= words.mapToPair(x -> new Tuple2<>(x, 1));
        /*JavaPairDStream<String, Integer> updatePairs = pairs.updateStateByKey((keyValues, state) ->{
            Integer newResult=0;
            if(state.isPresent()){
                newResult=state.get();
            }
            for (Integer i : keyValues) {
                newResult+=i;
            }
            return Optional.of(newResult);
        } );*/
        JavaPairDStream<String, Integer> wordCounts = pairs.reduceByKey((i1, i2) -> i1 + i2);
        wordCounts.print();
        ssc.start();
        try {
            ssc.awaitTermination();
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }
}
