package week07;
import static java.lang.System.out;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import scala.Tuple2;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;

public class CalcInvertedIndexByRDD {
    public static void main(String[] args) throws IOException {
        SparkConf conf = new SparkConf().setAppName("sunzihao-calc-inverted-index-01");
        JavaSparkContext ctx = new JavaSparkContext(conf);
        JavaRDD<String> linesA = ctx.textFile("d:/Temp/Spark/A.txt");
        JavaRDD<String> wordsA = linesA.flatMap(line -> Arrays.asList( line.split(" ",-1) ).iterator() );
        JavaPairRDD<String, Integer> countsA = wordsA.mapToPair(w -> new Tuple2<>(w,1)).reduceByKey( (x, y) -> (int) x + (int)y );
        JavaPairRDD<String,List<Tuple2<String,Integer>>> countsAOfDoc = countsA.mapValues(v ->   new ArrayList<Tuple2<String,Integer>>( Arrays.asList(new Tuple2<String,Integer>("A",v)) ));
        out.println(countsAOfDoc.collect());

        JavaRDD<String> linesB = ctx.textFile("d:/Temp/Spark/B.txt");
        JavaRDD<String> wordsB = linesB.flatMap( line -> Arrays.asList( line.split(" ",-1) ).iterator() );
        JavaPairRDD<String, Integer> countsB = wordsB.mapToPair(w -> new Tuple2<>(w,1) ).reduceByKey( (x,y) -> (int) x + (int) y ) ;
        JavaPairRDD<String,List<Tuple2<String,Integer>>> countsBOfDoc = countsB.mapValues( v -> new ArrayList<>(  Arrays.asList( new Tuple2<>("B",v) ) )   );
        out.println(countsBOfDoc.collect());

        JavaRDD<String> linesC = ctx.textFile("d:/Temp/Spark/C.txt");
        JavaRDD<String> wordsC = linesC.flatMap( line -> Arrays.asList( line.split(" ",-1) ).iterator() );
        JavaPairRDD<String,Integer> countsC = wordsC.mapToPair( w -> new Tuple2<>(w,1) ).reduceByKey( (x,y) -> (int)x + (int)y );
        JavaPairRDD<String,List<Tuple2<String,Integer>>> countsCofDoc = countsC.mapValues( v -> new ArrayList<>( Arrays.asList(new Tuple2<>("C",v) ) ) );
        out.println(countsCofDoc.collect());

        JavaPairRDD<String,List<Tuple2<String,Integer>>> allCountsOfDoc = countsAOfDoc.union(countsBOfDoc).union(countsCofDoc);
        JavaPairRDD<String, List<Tuple2<String,Integer>>> allIndex = allCountsOfDoc.reduceByKey( (t1,t2) -> { t1.addAll(t2) ;return t1;}   );
        out.println(allIndex.collect());

        allIndex.saveAsTextFile("d:/Temp/Spark/Idx.txt");
        //System.in.read();
    }
}
