package com.gjy.learning.java;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import scala.Tuple2;

import java.util.Arrays;
import java.util.List;

public class WordCount {
    public static void main(String[] args) {
        SparkConf conf = new SparkConf()
                .setAppName("WordCount")
                .setMaster("local");
        JavaSparkContext sc = new JavaSparkContext(conf);
// 1.读取src/main/resources/wordcount.txt⽂件
        JavaRDD<String> textFile = sc.textFile("src/main/resources/wordcount.txt");
// 2.统计⽂件⾏数
        long count = textFile.count();
        System.out.println("⽂件⾏数：" + count);
// 3.获取⽂件的第⼀⾏
        String firstLine = textFile.first();
        System.out.println("⽂件的第⼀⾏：" + firstLine);
// 4.过滤包含hello这个单词的⾏
        List<String> linesWithHello = textFile.filter(line ->
                line.contains("hello")).collect();
        System.out.println("包含hello这个单词的⾏ " + linesWithHello);
// 5.有多少⾏包含hello这个单词
        long countWithHello = textFile.filter(line -> line.contains("hello")).count();
        System.out.println("包含hello这个单词的⾏数：" + countWithHello);
// 6.按单词进行文件数据拆分
        JavaRDD<String> wordsRDD = textFile.flatMap(line -> Arrays.asList(line.split(" ")).iterator());
// 7.统计每个单词出现的次数
        JavaPairRDD<String, Integer> pairRDD = wordsRDD.mapToPair(word -> new Tuple2<>
                (word, 1));
// 8.统计每个单词出现的次数
        JavaPairRDD<String, Integer> retRDD = pairRDD.reduceByKey(Integer::sum);

// 9.输出每个单词出现的次数
        retRDD.foreach(retPair -> System.out.println(retPair._1 + "\t" + retPair._2));
        sc.stop();
    }
}
