package demo.spark.local;

import java.util.Arrays;
import java.util.Iterator;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;

import scala.Tuple2;

/**
 * 
 * 运行shell脚本如下
 * 
 * <pre>
 * ~/bigdatasoftware/spark-2.1.3-bin-hadoop2.7/bin/spark-submit \

--class com.hzk.sparkStreaming.HDFSWordCount \
--driver-java-options "-Dspark.testing.memory=471859200" \
--num-executors 3 \
--driver-memory 100m \
--executor-memory 512m \
--executor-cores 3 \
~/bigdatasoftware/spark-2.1.3-bin-hadoop2.7/study/SparkStudy-1.0-SNAPSHOT.jar \
 * </pre>
 * 
 * 运行shell脚本后，将文本put进hdfs
 * 
 * <pre>
 * hadoop fs -put ./wc.txt /datas
 * </pre>
 */
public class HDFSWordCount {
    public static void main(String[] args) throws InterruptedException {
        SparkConf conf = new SparkConf().setMaster("local[2]").setAppName("HDFSWordCount");
        JavaStreamingContext jssc = new JavaStreamingContext(conf, Durations.seconds(5));

        // 首先，使用JavaStreamingContext的textFileStream()方法，针对HDFS目录创建输入数据流
        JavaDStream<String> lines = jssc.textFileStream("hdfs://hadoop-001:9000/datas");

        // 执行wordcount操作
        JavaDStream<String> words = lines.flatMap(new FlatMapFunction<String, String>() {
            private static final long serialVersionUID = 1L;

            @Override
            public Iterator<String> call(String line) throws Exception {
                return Arrays.asList(line.split(" ")).iterator();
            }
        });

        JavaPairDStream<String, Integer> pairs = words.mapToPair(

                new PairFunction<String, String, Integer>() {
                    private static final long serialVersionUID = 1L;

                    @Override
                    public Tuple2<String, Integer> call(String word) throws Exception {
                        return new Tuple2<String, Integer>(word, 1);
                    }
                });

        JavaPairDStream<String, Integer> wordCounts = pairs.reduceByKey(

                new Function2<Integer, Integer, Integer>() {

                    private static final long serialVersionUID = 1L;

                    @Override
                    public Integer call(Integer v1, Integer v2) throws Exception {
                        return v1 + v2;
                    }

                });

        wordCounts.print();

        jssc.start();
        jssc.awaitTermination();
        jssc.close();
    }
}
