package com.atguigu.bigdata.spark.core.rdd.part;

import org.apache.spark.Partitioner;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaSparkContext;
import scala.Tuple2;

import java.util.ArrayList;
import java.util.List;

public class Spark01_RDD_Part_JAVA {
    public static void main(String[] args) {
        /*
            自定义分区器
         */

        // 1.创建配置对象
        SparkConf conf = new SparkConf().setMaster("local[*]").setAppName("sparkCore");
        // 2. 创建sparkContext
        JavaSparkContext sc = new JavaSparkContext(conf);
        List<Tuple2<String,String>> list = new ArrayList<>();
        list.add(new Tuple2<String, String>("NBA", "11111"));
        list.add(new Tuple2<String, String>("NBA", "11222"));
        list.add(new Tuple2<String, String>("CBA", "2222"));
        list.add(new Tuple2<String, String>("WNBA", "3333"));
        JavaPairRDD<String, String> rdd = sc.<String, String>parallelizePairs(list);

        JavaPairRDD<String,String>  partion = rdd.partitionBy(new MyPartioner());

        partion.saveAsTextFile("output");

        sc.stop();
    }

}

class  MyPartioner extends Partitioner {

    @Override
    public int numPartitions() {
        return 3;
    }

    @Override
    public int getPartition(Object key) {
        if(key.equals("NBA")) {
            return 0;
        } else if(key.equals("CBA")) {
            return 1;
        }
        return 2;
    }
}
