package org.geek.spark.test;

import org.apache.commons.lang.StringUtils;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.PairFlatMapFunction;
import org.apache.spark.sql.SparkSession;
import scala.Tuple2;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.stream.Collectors;

public class InvertedIndex {

    public static void main(String[] args) {
        //1.建立spark session
        SparkSession spark = SparkSession
                .builder()
                .master("local")
                .appName("InvertedIndex")
                .getOrCreate();

        //2.创建spark上下文
        JavaSparkContext jsc = new JavaSparkContext(spark.sparkContext());

        //3.生成（文件ID，关键词列表）数据集
        JavaPairRDD<String, String> fileNameLinesRDD = jsc
                .wholeTextFiles(args[0]); //  /user/chenlei/week5~6_homework/input

        //4.生成（关键词，文件ID）数据集
        JavaPairRDD<String, String> wordFileNameRDD =
            fileNameLinesRDD.flatMapToPair(new PairFlatMapFunction<Tuple2<String, String>, String, String>(){
                @Override
                public Iterator<Tuple2<String, String>> call(Tuple2<String, String> stringStringTuple2) throws Exception {
                    String fileName = stringStringTuple2._1().substring(stringStringTuple2._1().lastIndexOf('/')+1);
                    String[] lines = stringStringTuple2._2().split("[\r\n]",-1);
                    List<Tuple2<String,String>> list = new ArrayList<>(lines.length);
                    for (String line:lines) {
                        String[] wordsInCurrentLine = line.split(" ");
                        list.addAll(Arrays.stream(wordsInCurrentLine).map(word -> new Tuple2<>(word, fileName)).collect(Collectors.toList()));
                    }
                    return list.iterator();
                }
            });

        //5.生成不带词频的反向文件索引
        JavaPairRDD<String, String> wordFileNamesPairs = wordFileNameRDD
                .distinct()
                .groupByKey()
                .mapToPair(wordFileNamesPairsIterator -> new Tuple2<>(wordFileNamesPairsIterator._1, StringUtils.join(wordFileNamesPairsIterator._2.iterator(), ',')))
                .sortByKey();
        for(Tuple2 t:wordFileNamesPairs.collect()){
            System.out.printf("\"%s\": {%s}%n",t._1,t._2);
        }

        //6.生成((关键词,文件ID),出现次数)}数据集
        JavaPairRDD<Tuple2<String, String>, Integer> wordFileNameCountPairs = wordFileNameRDD
                .mapToPair(wordFileNamePair -> new Tuple2<>(wordFileNamePair, 1))
                .reduceByKey(Integer::sum);

        //7.生成(关键词,(文件ID,出现次数))数据集
        JavaPairRDD<String, Tuple2<String, Integer>> wordCountPerFileNamePairs = wordFileNameCountPairs
                .mapToPair(wordFileNameCountPerPair -> new Tuple2<>(wordFileNameCountPerPair._1._1, new Tuple2<>(wordFileNameCountPerPair._1._2, wordFileNameCountPerPair._2)));

        //8.生成带词频的反向文件索引
        JavaPairRDD<String, String> result = wordCountPerFileNamePairs
                .groupByKey()
                .mapToPair(wordCountPerFileNamePairIterator -> new Tuple2<>(wordCountPerFileNamePairIterator._1, StringUtils.join(wordCountPerFileNamePairIterator._2.iterator(), ',')))
                .sortByKey();
        //打印带词频的反向文件索引
        for (Tuple2<String, String> pair : result.collect()) {
            System.out.printf("\"%s\": {%s}%n", pair._1, pair._2);
        }

        //9.关闭spark session
        spark.close();
    }

}