package com.sub.spark.core.rdd.operate;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;

/**
 * @ClassName RDDMethod
 * @Description: RDD的常用方法-单值
 * @Author Submerge.
 * */
public class RDDMethod {

    public static void main(String[] args) throws InterruptedException {
        SparkConf local = new SparkConf().setAppName("Spark-Method-Map").setMaster("local[2]");
        JavaSparkContext jsc = new JavaSparkContext(local);
        System.out.println("=============");

        //map
        map_Method(jsc);
        System.out.println("=============");

        //flatMap
        flatMap_Method(jsc);
        System.out.println("=============");

        //filter
        filter_Method(jsc);
        System.out.println("=============");

        //distinct
        distinct_Method(jsc);
        System.out.println("=============");


        //group by
        groupBy_Method(jsc);
        System.out.println("=============");

        //sort by
        sortBy_Method(jsc);
        System.out.println("=============");

        //可以睡眠线程，访问localhost:4040/jobs/ 查看spark任务
        Thread.sleep(60000);

        //关闭
        jsc.close();

    }


    /**
     * RDD map 方法 ：将RDD中的元素类型映射为同一个或另外一个类型并返回
     * @param jsc
     */
    public static void map_Method(JavaSparkContext jsc){
        JavaRDD<String> parallelizeRDD = jsc.parallelize(Arrays.asList("hello", "spark", "big"));

        JavaRDD<String> mapRDD = parallelizeRDD.map(String::toUpperCase);

        mapRDD.collect().forEach(System.out::println);
    }

    /**
     * RDD flatMap  方法 ：扁平化映射，将部分水平拆分后合并为整体
     * @param jsc
     */
    public static void flatMap_Method(JavaSparkContext jsc){
        List<List<Integer>> listList = new ArrayList<>();
        List<Integer> numList1 = Arrays.asList(1, 2);
        List<Integer> numList2 = Arrays.asList(3, 4);
        listList.add(numList1);
        listList.add(numList2);

        JavaRDD<List<Integer>> parallelize = jsc.parallelize(listList);

        JavaRDD<Integer> flatMap = parallelize.flatMap(lists -> {
            List<Integer> newList = new ArrayList<>();
            lists.forEach(data -> newList.add(data * 2));
            return newList.iterator();
        });

        flatMap.collect().forEach(System.out::println);

    }


    /**
     * RDD map 方法 ：将RDD中的元素类型映射为同一个或另外一个类型并返回
     * @param jsc
     */
    public static void filter_Method(JavaSparkContext jsc){
        JavaRDD<String> parallelizeRDD = jsc.parallelize(Arrays.asList("hello", "spark", "big","hi"));

        JavaRDD<String> filterRDD = parallelizeRDD.filter(s -> s.startsWith("h"));

        filterRDD.collect().forEach(System.out::println);
    }


    /**
     * RDD distinct 方法 ：将RDD中的元素去重
     * @param jsc
     */
    public static void distinct_Method(JavaSparkContext jsc){
        JavaRDD<String> parallelizeRDD = jsc.parallelize(Arrays.asList("spark","hello", "spark", "big","hello"));

        JavaRDD<String> mapRDD = parallelizeRDD.distinct(2);

        mapRDD.collect().forEach(System.out::println);
    }


    /**
     * RDD group by  方法
     * @param jsc
     */
    public static void groupBy_Method(JavaSparkContext jsc){
        JavaRDD<String> parallelizeRDD = jsc.parallelize(Arrays.asList("spark","hello", "spark", "big","hello"));

        JavaPairRDD<String, Iterable<String>> groupByRDD = parallelizeRDD.groupBy(s -> s);
        groupByRDD.collect().forEach(System.out::println);

    }

    /**
     * RDD map 方法 ：将RDD中的元素类型映射为同一个或另外一个类型并返回
     * @param jsc
     */
    public static void sortBy_Method(JavaSparkContext jsc){
        JavaRDD<Integer> parallelizeRDD = jsc.parallelize(Arrays.asList(3,4,5,8,1,2,7,6));
        parallelizeRDD.sortBy(data->data,true,2).collect().forEach(System.out::println);
    }




}
