package com.jscloud.bigdata;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import scala.Tuple2;

import java.util.Arrays;
import java.util.List;

public class JavaLambdaWordCount {
        public static void main(String[] args) {
                //获取程序入口类
                SparkConf sparkConf = new SparkConf().setAppName("lambdaCount")
                        .setMaster("local[*]");

                JavaSparkContext sparkContext = new JavaSparkContext(sparkConf);

                //设置log级别
                sparkContext.setLogLevel("WARN");

                //获取数据
                JavaRDD<String> lines = sparkContext.textFile("hdfs://bigdata01:8020/hello.txt");

                //获取所有单词
                JavaRDD<String> words = lines.flatMap(line -> Arrays.asList(line.split(" ")).iterator());

                //单词计数为1
                JavaPairRDD<String, Integer> wordAndOne = words.mapToPair(word -> new Tuple2<String, Integer>(word, 1));

                //每个单词出现次数进行累加
                JavaPairRDD<String, Integer> wordAndTotal = wordAndOne.reduceByKey((x, y) -> x + y);

                //排序统计
                JavaPairRDD<Integer, String> totalAndWord = wordAndTotal.mapToPair(kv -> kv.swap());
                //true 表示升序，false 表示降序
                JavaPairRDD<Integer, String> sortedTotalAndWord = totalAndWord.sortByKey(false);

                JavaPairRDD<String, Integer> sortedResult = sortedTotalAndWord.mapToPair(Tuple2::swap);

                List<Tuple2<String, Integer>> collect = sortedResult.collect();

                for (Tuple2<String, Integer> stringIntegerTuple2 : collect) {
                        System.out.println(stringIntegerTuple2._1 +  ",  出现次数：" +stringIntegerTuple2._2);
                }

                sparkContext.stop();


        }
}
