package com.river.spark.mr;

import com.google.common.base.Splitter;
import com.google.common.collect.Lists;
import org.apache.commons.lang3.StringUtils;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import scala.Tuple2;

import java.net.SocketTimeoutException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;

/**
 * @author riverfan
 */
public class MapReduceActionDemon {


    private static final Splitter SPLITTER = Splitter.on('|');

    public static void main(String[] args) {

        SparkConf conf = new SparkConf()
                .setAppName("MapReduceActionDemon")
                .setMaster("local[2]");

        JavaSparkContext sc = new JavaSparkContext(conf);

        String studentfilePaht = "/Users/riverfan/mytest/spark/mrAction/student.txt";
        String scorefilePaht = "/Users/riverfan/mytest/spark/mrAction";

        mapPartitionsTest(sc, studentfilePaht);
//        mapPartitionsWithIndexTest(sc, studentfilePaht);
//        unionTest(sc, studentfilePaht);
//        intersectionTest(sc, studentfilePaht);
//        subtractTest(sc, studentfilePaht);
//        distinctTest(sc, studentfilePaht);
//        groupByKeyTest(sc, studentfilePaht);
//        reduceByKeyTest(sc, studentfilePaht);
//        sortByKeyTest(sc, studentfilePaht);
//        joinTest(sc, studentfilePaht);
//        cartesianTest(sc, studentfilePaht);

//        JavaRDD<String> rdd1 = sc.textFile(studentfilePaht);
//        sc.textFile(studentfilePaht).foreach(t -> System.out.println(t));
//
//        // JavaPairRDD<String,Integer> rdd1 =
//        sc.textFile(studentfilePaht)
//                .map(t -> {
//                    List<String> strList = SPLITTER.splitToList(t);
//                    return strList;
//
//                }).flatMap(t -> t.stream().iterator())
//                .foreach(t -> System.out.println(t));

        System.out.println("OK");
    }

    public static void mapPartitionsTest(JavaSparkContext sc, String studentfilePaht) {
        sc.textFile(studentfilePaht, 2)
                .mapPartitions(t -> {
                    t.forEachRemaining(tt -> System.out.println(Thread.currentThread().getId() + "--> " + tt));
                    return t;
                }).count();

    }

    public static void mapPartitionsWithIndexTest(JavaSparkContext sc, String studentfilePaht) {
        sc.textFile(studentfilePaht, 2)
                .mapPartitionsWithIndex((index, y) -> {
                    System.out.println(Thread.currentThread().getId() + "--> index = " + index);
                    y.forEachRemaining(tt -> System.out.println(Thread.currentThread().getId() + "--> " + tt));
                    return y;
                }, false).count();

    }

    public static void unionTest(JavaSparkContext sc, String studentfilePaht) {
        JavaRDD<String> jrdd = sc.parallelize(Arrays.asList("hello", "river"));
        sc.textFile(studentfilePaht, 2)
                .union(jrdd)
                .collect()
                .forEach(t -> System.out.println(t));

    }


    public static void intersectionTest(JavaSparkContext sc, String studentfilePaht) {
        JavaRDD<String> jrdd = sc.parallelize(Arrays.asList("river", "river"));
        sc.textFile(studentfilePaht)
                .intersection(jrdd)
                .collect()
                .forEach(t -> System.out.println(t));

    }


    public static void subtractTest(JavaSparkContext sc, String studentfilePaht) {
        JavaRDD<String> jrdd = sc.parallelize(Arrays.asList("3|李磊|22", "river"));
        sc.textFile(studentfilePaht)
                .subtract(jrdd)
                .collect()
                .forEach(t -> System.out.println(t));

    }

    public static void distinctTest(JavaSparkContext sc, String studentfilePaht) {
        sc.parallelize(Arrays.asList("river", "river", "frank"))
                .distinct().foreach(t -> System.out.println(t));


    }

    /**
     * This gathers all the values of a given key
     * and forms the list of values from all the individual elements.
     * @param sc
     * @param studentfilePaht
     */
    public static void groupByKeyTest(JavaSparkContext sc, String studentfilePaht) {
        sc.textFile(studentfilePaht, 2)
                .mapToPair(t -> new Tuple2<>(t.substring(0, t.indexOf("|")), t))
                .groupByKey()
                .map(t -> {
                    System.out.println(Thread.currentThread().getId() + ":" + t);
                    return t;
                })
                .collect()
                .forEach(t -> System.out.println(Thread.currentThread().getId() + ":" + t));

    }


    public static void reduceByKeyTest(JavaSparkContext sc, String studentfilePaht) {
        sc.parallelize(Arrays.asList("river|hello", "river|cat", "frank|moon"))
                .mapToPair(t -> new Tuple2<>(t.substring(0, t.indexOf("|")), t))
                .reduceByKey((t1, t2) -> t1 + "+" + t2)
                .collect()
                .forEach(t -> System.out.println(t));

    }


    public static void sortByKeyTest(JavaSparkContext sc, String studentfilePaht) {
        sc.parallelize(Arrays.asList("river|hello",
                "frank|cat1",
                "river|cat2",
                "frank|cat3",
                "river|cat4",
                "frank|cat5",
                "river|moon"))
                .mapToPair(t -> new Tuple2<>(t.substring(0, t.indexOf("|")), t))
                .sortByKey(false,2)

                .collect()
                .forEach(t -> System.out.println(t));

    }

    public static void joinTest(JavaSparkContext sc, String studentfilePaht) {
        JavaPairRDD<String,String> jrdd =
                sc.parallelize(Arrays.asList("1|good boy", "2|go on"))
                .mapToPair(t->{
                    List<String> strList =  SPLITTER.splitToList(t);
                    return new Tuple2<>(strList.get(0),strList.get(1));
                });
        sc.textFile(studentfilePaht)
                .mapToPair(t->{
                    List<String> strList =  SPLITTER.splitToList(t);
                    return new Tuple2<>(strList.get(0),t);
                })
                .join(jrdd)
                .collect()
                .forEach(t -> System.out.println(t));

    }

    //卡迪尔积

    public static void cartesianTest(JavaSparkContext sc, String studentfilePaht) {
        JavaRDD<String> jrdd =
                sc.parallelize(Arrays.asList("good boy", "go on"));
        sc.textFile(studentfilePaht)
                .cartesian(jrdd)
                .collect()
                .forEach(t -> System.out.println(t));

    }



    public static void wordCount2(JavaSparkContext sc, String filePaht) {
        JavaPairRDD<String, Integer> rdd1 = sc.textFile(filePaht)
                .flatMap(s -> Arrays.asList(s.split(" ")).iterator())
                .mapToPair(s -> {
                    System.out.println(Thread.currentThread());
                    return new Tuple2<>(s, 1);
                })
                .filter(t -> StringUtils.isNoneBlank(t._1) && !StringUtils.equals(t._1, ","))
                .reduceByKey((v1, v2) -> (v1 + v2));
        rdd1.collect().forEach(t -> System.out.println(t + "  " + Thread.currentThread()));

    }
}
