package com.catmiao.rdd.operate.part;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.storage.StorageLevel;
import scala.Tuple2;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;

/**
 * @author ChengMiao
 * @title: Transfer_01_Map
 * @projectName spark_study
 * @description: 分区器
 * @date 2024/11/25 16:27
 */
public class Part_01 {

    public static void main(String[] args) throws InterruptedException {

        final SparkConf conf = new SparkConf();
        conf.setAppName("appName");
        conf.setMaster("local[*]");

        final JavaSparkContext jsc = new JavaSparkContext(conf);


        List<Tuple2<String,Integer>> datas = new ArrayList<>();
        datas.add(new Tuple2<String, Integer>("a",1));
        datas.add(new Tuple2<String, Integer>("a",2));
        datas.add(new Tuple2<String, Integer>("a",3));
        datas.add(new Tuple2<String, Integer>("a",4));


        JavaRDD<Tuple2<String, Integer>> rdd = jsc.parallelize(datas, 3);

        rdd.saveAsTextFile("output/part/before");
        JavaPairRDD<String, Integer> pairRDD = rdd.mapToPair(
                kv -> {
                    System.out.println("###############");
                    return kv;
                }
        );

        JavaPairRDD<String, Integer> reduceRdd = pairRDD.reduceByKey(Integer::sum);

        /**
         * 数据分区的规则：
         *  计算后数据所在分区是通过Spark的内部计算【分区】完成，尽可能让数据均衡【散列】一些，但不是平均分
         *
         *  param1 分区规则，不传递时采用默认的分区规则  HashPartitioner
         *          HashPartitioner.getPartition：传递key，返回分区编号，分区编号从0开始
         *                逻辑： key.hashCode % partNum【哈希取余】
         *  param2 数据聚合的逻辑
         */
        reduceRdd.saveAsTextFile("output/part/after");



        System.out.println("2计算完毕");


        jsc.close();


    }
}




