package com.catmiao.rdd.operate.transform;

import com.google.common.collect.Lists;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import scala.Tuple2;

import java.util.ArrayList;
import java.util.Arrays;

/**
 * @author ChengMiao
 * @title: Transfer_01_Map
 * @projectName spark_study
 * @description: TODO
 * @date 2024/11/25 16:27
 */
public class Transfer_15_SortByKey {

    public static void main(String[] args) throws InterruptedException {

        final SparkConf conf = new SparkConf();
        conf.setAppName("appName");
        conf.setMaster("local[*]");

        final JavaSparkContext jsc = new JavaSparkContext(conf);


        ArrayList<Tuple2<String, Integer>> datas = Lists.newArrayList(
                new Tuple2<String, Integer>("a", 1),
                new Tuple2<String, Integer>("b", 2),
                new Tuple2<String, Integer>("a", 3),
                new Tuple2<String, Integer>("b", 4)
        );

        JavaRDD<Tuple2<String, Integer>> rdd = jsc.parallelize(datas);

        JavaPairRDD<String, Integer> pairRDD = rdd.mapToPair(t -> t);


        /**
         * SortByKey：按照key进行排序
         *  - false 降序
         *
         *  前提条件： 数据中的key必须实现Comparable接口
         */
        JavaPairRDD<String, Integer> sortRdd = pairRDD.sortByKey(false);

        /**
         * 合并分区 缩减可以 扩大不行
         *  - param2 true-可以Shuffle 则可以扩大分区
         */
        sortRdd.coalesce(1);

        /**
         * 重新设定分区数  扩大分区
         */
        sortRdd.repartition(2);

        sortRdd.collect().forEach(System.out::println);

        jsc.close();


    }
}
