package com.zzl.java;

import org.apache.spark.Partitioner;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import scala.Tuple2;
import scala.actors.threadpool.Arrays;

import java.util.List;

public class PartitionerByOperator {

    public static void main(String[] args) {

        SparkConf conf = new SparkConf().setMaster("local").setAppName("PartitionerByOperator");
        JavaSparkContext sc = new JavaSparkContext(conf);
        System.setProperty("hadoop.home.dir", "E:\\hadoop");
        List<Integer> list = Arrays.asList(new Integer[]{99, 88, 200, 20, 101, 102});

        //通过本地集合创建一个分区数为2的RDD

        JavaRDD<Integer> rdd = sc.parallelize(list, 2);

        JavaRDD<Integer> integerJavaRDD = rdd.mapPartitionsWithIndex((a, b) -> {
            System.out.println("partitionId:"+a);
            while (b.hasNext()){
                System.out.println(b.next()+"\t");
            }
            return b;
        }, false);
        integerJavaRDD.count();

        JavaPairRDD<Integer, Integer> rdd1 = rdd.mapToPair(a -> new Tuple2<>(a, a));
        JavaPairRDD<Integer, Integer> by = rdd1.partitionBy(new Partitioner() {
            @Override
            public int getPartition(Object key) {
                int k = (Integer) key;
                if (k < 100) {
                    return 0;
                } else {
                    return 1;
                }
            }

            @Override
            public int numPartitions() {
                return 2;
            }
        });
        JavaRDD<Integer> map = by.map((a) -> a._1);
        map.mapPartitionsWithIndex((a,b)->{
            System.out.println("partitionId:"+a);
            while (b.hasNext()){
                System.out.println(b.next()+"\t");
            }
            return b;
        },false).count();
        sc.stop();
        sc.close();
    }
}
