package javademo;

import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.storage.StorageLevel;
import scala.Tuple2;

import java.util.Arrays;
import java.util.List;
import java.util.Map;

/**
 * 项目名称:spark-learn<br>
 * 包名:javademo<br>
 * 用于:spark-learn<br>
 * 创建时间:2019年03月14日<br>
 * 更新时间:2019年03月14日<br>
 *
 * @author :lds（创建人）<br>
 * @version :v1.0（版本号）<br>
 * @since jdk1.8
 */
public class WordCount {
    public static void main(String[] args) {
        final JavaSparkContext context = SparkContextFactory.getContext("WordCount");
        final JavaRDD<String> wordRDD = context.textFile("README.md");
        final long titleCount = wordRDD.filter(word -> word.contains("####")).count();
        System.out.println("标题数量："+titleCount);

        //单词统计
        final JavaRDD<String> flatMapRDD = wordRDD.flatMap(line -> Arrays.asList(line.split(" ")).iterator())
                .persist(StorageLevel.MEMORY_ONLY());
        final JavaPairRDD<String, Integer> wordCountRDD = flatMapRDD.mapToPair(word -> new Tuple2<>(word, 1)).reduceByKey((x, y) -> x + y);

        //单词统计2
        final Map<String, Long> countByValue = flatMapRDD.countByValue();
        System.out.println("================countByValue================");
        countByValue.forEach((key,value) -> System.out.println(key+":"+value));


        final List<Tuple2<String, Integer>> collect = wordCountRDD.collect();
        collect.forEach(System.out::println);
    }
}
