package org.wyz.spark;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import scala.Tuple2;

import java.util.ArrayList;
import java.util.List;

public class Join {
    private static final String[] ARGS = {"data/input/join/user.txt", "data/input/join/user-exam.txt", "data/output/join"};


    // id拿走，剩下的字段构成一个List
    public static List<String> withoutId(int idIndex, String[] ss) {
        ArrayList<String> res = new ArrayList<>();
        for (int i = 0; i < ss.length; i++) {
            if (i == idIndex) continue;
            res.add(ss[i]);
        }
        return res;
    }

    // 2个List合并为1个
    public static List<String> mergeList(List<String> a, List<String> b) {
        ArrayList<String> res = new ArrayList<>();
        res.addAll(a);
        res.addAll(b);
        return res;
    }

    public static void main(String[] args) {
        SparkConf conf = new SparkConf().setMaster("local").setAppName("wyz-join");
        JavaSparkContext sc = new JavaSparkContext(conf);


        // "id attr1 attr2" -> ("id", ["attr1", "attr2"])
        JavaPairRDD<String, List<String>> rdd0 = sc.textFile(ARGS[0]).mapToPair(a -> {
            String[] ss = a.split("\\s");
            return new Tuple2<>(ss[0], withoutId(0, ss));
        });

        JavaPairRDD<String, List<String>> rdd1 = sc.textFile(ARGS[1]).mapToPair(a -> {
            String[] ss = a.split("\\s");
            return new Tuple2<>(ss[0], withoutId(0, ss));
        });


        // ("id", (["attr1","attr2"], ["attr3","attr4"])
        JavaPairRDD<String, List<String>> res = rdd0.join(rdd1)
                .mapToPair(a -> new Tuple2<>(a._1, mergeList(a._2._1, a._2._2)));

        res.map(a -> a._1 + " " + a._2).saveAsTextFile(ARGS[2]);

        sc.stop();
    }
}
