package com.zxj;

import org.apache.hadoop.fs.*;
import org.apache.spark.SparkContext;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.rdd.RDD;
import org.apache.spark.sql.SparkSession;
import scala.Tuple2;

import java.io.IOException;
import java.io.Serializable;
import java.util.Arrays;
import java.util.Comparator;
import java.util.List;
import java.util.regex.Pattern;

public class InvertedIndex {

    private static final Pattern SPACE = Pattern.compile(" ");

    public static void main(String[] args) throws IOException {

        if (args.length < 1) {
            System.err.println("请输入文件路径");
            System.exit(1);
        }

        String str = args[0];

        SparkSession spark = SparkSession
                .builder()
                .appName("InvertedIndex-zxj")
                .getOrCreate();

        SparkContext sparkContext = spark.sparkContext();

        JavaSparkContext jsc = new JavaSparkContext(sparkContext);

        // (文件名,单词)
        JavaRDD<Tuple2<String, String>> rdds = jsc.emptyRDD();

        FileSystem fs = FileSystem.get(sparkContext.hadoopConfiguration());

        RemoteIterator<LocatedFileStatus> filelist = fs.listFiles(new Path(str), false);

        while (filelist.hasNext()) {
            Path path = filelist.next().getPath();
            String fileName = path.getName();
            RDD<Tuple2<String, String>> rdd = jsc.textFile(path.toString())
                    .flatMap(s -> Arrays.asList(SPACE.split(s)).iterator())
                    .map(vo -> vo.replace("“", "").replace("”", ""))
                    .mapToPair(vo -> new Tuple2<>(fileName, vo)).rdd();
            rdds = rdds.union(rdd.toJavaRDD());
        }

        // ((文件名,单词),1)
        JavaPairRDD<Tuple2<String, String>, Integer> ones = rdds.mapToPair(vo -> new Tuple2<>(vo, 1));

        // ((文件名,单词),词频)
        JavaPairRDD<Tuple2<String, String>, Integer> counts = ones.reduceByKey((i1, i2) -> i1 + i2);

        // (单词,(文件名,词频))
        JavaPairRDD<String, String> rdd1 = counts
                .sortByKey(new Comp()) // 对文件名排序
                .mapToPair(vo -> new Tuple2<>(vo._1()._2(), "(" + vo._1()._1() + "," + vo._2() + ")"))
                ;

        // (单词,(文件名,词频),(文件名,词频)...)
        JavaPairRDD<String, String> rdd2 = rdd1.reduceByKey((t1, t2) -> t1 + "," + t2);

        // 单词,{(文件名，词频),(文件名，词频)}
        JavaPairRDD<String, String> rdd3 = rdd2
                .mapToPair(vo -> new Tuple2<>(vo._1(), "{" + vo._2() + "}"))
                .sortByKey() // 对单词排序
                ;

        // 安装part去生成
//        rdd3.saveAsTextFile(str + "/out/result");

        List<Tuple2<String, String>> list = rdd3.collect();

        // 输出结果
        for (Tuple2<String, String> tuple2 : list) {
            System.out.println(tuple2._1() + ":" + tuple2._2());
        }

        Path resPath = new Path(str + "/out/result");
        FSDataOutputStream os = fs.create(resPath);
        for (Tuple2<String, String> tuple2 : list) {
            os.writeBytes(tuple2._1() + ":" + tuple2._2() + "\t\n");
        }
        os.flush();
        os.close();

        spark.stop();
    }

    private static class Comp implements Comparator<Tuple2<String, String>>, Serializable {

        @Override
        public int compare(Tuple2<String, String> o1, Tuple2<String, String> o2) {
            return o1._1().compareTo(o2._1());
        }
    }
}
