package com.java.learn.wordcount;

import com.sun.org.apache.xpath.internal.operations.And;
import org.apache.spark.SparkConf;
import org.apache.spark.SparkContext;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import scala.Tuple2;

import java.util.Arrays;

/**
 * @Copyright: Shanghai Definesys Company.All rights reserved.
 * @Description: 使用lambada表达式编写wordcount
 * @author: chuhaitao
 * @since: 2019/2/17 22:08
 * @history: 1.2019/2/17 created by chuhaitao
 */
public class JavaLambabaWordCount {

    public static void main(String[] args) {
        args=new String[]{"D:\\tmp\\log.txt","D:\\tmp\\log3.txt"};
        SparkConf conf = new SparkConf();
        conf.setAppName("JavaLambabaWordCount");
        conf.setMaster("local");
        JavaSparkContext jsc = new JavaSparkContext(conf);

        final JavaRDD<String> lines = jsc.textFile(args[0]);

        final JavaRDD<String> words = lines.flatMap(line -> Arrays.asList(line.split(" ")).iterator());

        final JavaPairRDD<String, Integer> wordsAndOne = words.mapToPair(x -> new Tuple2<String, Integer>(x, 1));

        final JavaPairRDD<String, Integer> reduced = wordsAndOne.reduceByKey((x, y) -> x + y);

        final JavaPairRDD<Integer, String> reduced2 = reduced.mapToPair(tp -> tp.swap());

        final JavaPairRDD<Integer, String> sorted = reduced2.sortByKey(false);

        final JavaPairRDD<String, Integer> sorted2 = sorted.mapToPair(tp -> tp.swap());

        sorted2.saveAsTextFile(args[1]);

        jsc.close();
    }
}
