package com.example;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.Optional;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.Function3;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.State;
import org.apache.spark.streaming.StateSpec;
import org.apache.spark.streaming.api.java.*;
import scala.Tuple2;

import java.util.Arrays;

/**
 * bin/spark-submit --master local[*] \
 * --class com.example.JavaSparkStreaming \
 * --name wordCount /opt/spark245hadoop27/bin/spark-0.0.1-SNAPSHOT.jar
 *
 * @author elinshaw
 */
public class JavaSparkStreaming {

    public static void main(String[] args) {
        SparkConf conf = new SparkConf().setMaster("local[3]").setAppName("JavaSparkStreaming");
        //SparkConf conf = new SparkConf().setMaster("spark://hadoop1:7077").setAppName("sparkStreaming");
        JavaStreamingContext ssc = new JavaStreamingContext(conf, Durations.seconds(5));
        // ssc.checkpoint("hdfs://spark1:9000/wordcount_checkpoint");
        ssc.checkpoint("file:///home/checkpointData");
        JavaReceiverInputDStream<String> lines = ssc.socketTextStream("192.168.101.58", 9999);
        JavaDStream<String> flatMap = lines.flatMap((FlatMapFunction<String, String>) line -> Arrays.asList(line.split(" ")).iterator());
        JavaPairDStream<String, Integer> mapToPair = flatMap.mapToPair((PairFunction<String, String, Integer>) word -> new Tuple2<>(word, 1))
                .reduceByKey((Function2<Integer, Integer, Integer>) (v1, v2) -> v1 + v2);
        /*        Function2<List<Integer>, Optional<Integer>, Optional<Integer>> updateFunction = new Function2<List<Integer>, Optional<Integer>, Optional<Integer>>() {
         *//**
         * 对于每个单词每次batch计算的时候都会调用这个函数
         * @param values 相当于是这个batch中，这个key的新的值可能多个比如说一个hello可能有2个1，(hello, 1) (hello, 1)，那么传入的是(1,1)
         * @param state     这个key之前的状态state其中泛型类型是自己指定
         * @return
         * @throws Exception
         *//*
            @Override
            public Optional<Integer> call(List<Integer> values, Optional<Integer> state) throws Exception {
                // 定义全局计数
                Integer newValue = 0;
                // 判断state如果不存在说明是一个key第一次出现
                // 如果存在说明这个key之前已经统计过全局的次数
                if (state.isPresent()) {
                    newValue = state.get();
                }
                // 将本次新出现的值都累加到newValue就是一个key目前全局的统计次数
                for (Integer value : values) {
                    newValue += value;
                }
                return Optional.of(newValue);
            }
        };
        JavaPairDStream<String, Integer> runningCounts = mapToPair.updateStateByKey(updateFunction);*/

        Function3<String, Optional<Integer>, State<Integer>, Tuple2<String, Integer>> mappingFunction =
                (word, one, state) -> {
                    int sum = one.orElse(0) + (state.exists() ? state.get() : 0);
                    Tuple2<String, Integer> output = new Tuple2<>(word, sum);
                    state.update(sum);
                    return output;
                };

        JavaMapWithStateDStream<String, Integer, Integer, Tuple2<String, Integer>> runningCounts = mapToPair.mapWithState(StateSpec.function(mappingFunction));
        runningCounts.print();
        ssc.start();
        try {
            ssc.awaitTermination();
        } catch (
                Exception e) {
            e.printStackTrace();
        } finally {
            ssc.close();
        }
    }


}
