package spark.rdd;

import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.VoidFunction;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.Tuple2;
import spark.stream.kafka.SparkUtils;

import java.util.Arrays;

/**
 * word文件单词出现个数计算
 *
 * @author dlm
 */
public class WordCountCalcLearing {
    private static Logger logger = LoggerFactory.getLogger(WordCountCalcLearing.class);

    public static void main(String[] args) {
        JavaSparkContext jsc = SparkUtils.getJavaSparkContext("WordCountSpark", "local[2]", "WARN");
        JavaRDD<String> wordRdd = SparkUtils.createRddExternal(jsc, "D:/README.txt");
        wordCountCal(wordRdd);
        jsc.stop();
    }

    /**
     * wordRdd统计计算逻辑
     *
     * @param wordRdd
     */
    public static void wordCountCal(JavaRDD<String> wordRdd) {
        // 将整个字符串根据空格分隔成单词
        JavaRDD<String> wordFlatMap =
                wordRdd.flatMap((s -> Arrays.asList(s.split("[^a-zA-Z']+")).iterator()));

        // 将每个单词映射各位为1
        JavaPairRDD<String, Integer> wordMapToPair = wordFlatMap.mapToPair(t -> new Tuple2<String, Integer>(t, 1));

        // 将每个重复key的value相加
        JavaPairRDD<String, Integer> wordReduceByKey = wordMapToPair.reduceByKey((v1, v2) -> v1 + v2);
        // 输出统计结果
        wordReduceByKey.sortByKey().foreach(new VoidFunction<Tuple2<String, Integer>>() {

            /**
             *
             */
            private static final long serialVersionUID = 1L;

            @Override
            public void call(Tuple2<String, Integer> t) throws Exception {
                logger.warn("key:" + t._1 + ",value:" + t._2);
            }
        });

        wordReduceByKey.sortByKey().foreach(t -> System.out.println("key:" + t._1 + ",value:" + t._2));
    }
}
