package com.huawei.spark;

import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import scala.Tuple2;

import java.util.Arrays;

/**
 * spark-submit --class com.huawei.spark.SparkWordCount --master yarn --deploy-model client /root/SparkWorkCount-0.0.1-SNAPSHOT.jar /user/FinalExam/word.txt /user/spark_output
 */
public class SparkWordCount {
    public static void main(String[] args) {

        JavaSparkContext javaSparkContext = new JavaSparkContext("local", "SparkWordCount");
        // 调用textFile读取文件，
        JavaRDD<String> textFile = javaSparkContext.textFile(args[0]);
        // 将每行文件按照空格划分，生成的结果为ArrayList，然后利用iterator()方法装ArrayList转换为迭代器
        JavaRDD<String> words = textFile.flatMap(line -> Arrays.asList(line.split(" ")).iterator());
        // 将每个单词映射成一个(word,1L)
        JavaPairRDD<String, Long> wordAndOne = words.mapToPair(word -> new Tuple2<>(word, 1L));
        // 利用reduceByKey()将(word,1L)按键分组统计，
        JavaPairRDD<String, Long> result = wordAndOne.reduceByKey((a, b) -> a + b);

        result.saveAsTextFile(args[1]);

        javaSparkContext.stop();

    }
}
