package com.ky.pro.big.data.spark.word.index;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import scala.Tuple2;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.regex.Pattern;

public class Main {

    private static final Pattern SPACE = Pattern.compile(" ");

    public static void main(String[] args) throws Exception {

        SparkConf conf = new SparkConf().setAppName("Word Count").setMaster("local[*]");
        JavaSparkContext sc = new JavaSparkContext(conf);

        JavaPairRDD<Tuple2<String, Integer>, Integer> ones = null;

        // 读取数据文件，形成数据集
        for (int i = 0; i < 3; i++) {

            JavaRDD<String> lines = sc.textFile("data/" + i + ".dat");

            JavaRDD<String> words = lines.flatMap(s -> Arrays.asList(SPACE.split(s)).iterator());

            int fileNameNo = i;
            JavaPairRDD<Tuple2<String, Integer>, Integer> tmp = words.mapToPair(
                    s -> new Tuple2<>(new Tuple2<>(s, fileNameNo), 1));
            ones = (ones == null) ? tmp : ones.union(tmp);
        }

        // 计算各文件词频、计算后调整kv结构
        JavaPairRDD<String, Tuple2<Integer, Integer>> counts = ones.reduceByKey((i1, i2) -> i1 + i2)
                .mapToPair(k-> new Tuple2<>(k._1._1, new Tuple2<>(k._1._2, k._2)));


        // 根据单词分组，生成倒排索引
        List<Tuple2<String, Iterable<Tuple2<Integer, Integer>>>> output = counts.groupByKey().collect();

        for (Tuple2<?, ?> tuple : output) {
            System.out.println(tuple._1() + ": " + tuple._2());
        }
    }

}
