package com.abyss.transformation;

import org.apache.flink.api.common.operators.Order;
import org.apache.flink.api.java.ExecutionEnvironment;
import org.apache.flink.api.java.operators.DataSource;
import org.apache.flink.api.java.operators.PartitionOperator;
import org.apache.flink.api.java.tuple.Tuple3;

/**
 * 在分区内进行排序
 */
public class SortPartitionDemo {
    public static void main(String[] args) throws Exception {
        // 1. Env
        ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

        // 2. Source
        DataSource<Tuple3<String, String, Integer>> source = env.fromElements(
                Tuple3.of("hadoop", "a", 11),
                Tuple3.of("hadoop", "a", 21),
                Tuple3.of("hadoop", "b", 16),
                Tuple3.of("hive", "b", 13),
                Tuple3.of("hadoop", "a", 3),
                Tuple3.of("hive", "a", 31),
                Tuple3.of("hive", "a", 21),
                Tuple3.of("hive", "b", 11),
                Tuple3.of("hive", "a", 15),
                Tuple3.of("hive", "b", 19),
                Tuple3.of("spark", "a", 51),
                Tuple3.of("spark", "b", 61),
                Tuple3.of("spark", "a", 19),
                Tuple3.of("spark", "b", 35),
                Tuple3.of("spark", "a", 66),
                Tuple3.of("spark", "b", 76),
                Tuple3.of("flink", "a", 11),
                Tuple3.of("flink", "b", 51),
                Tuple3.of("flink", "a", 31)
        );

        // 3. 按照hash进行分区
        PartitionOperator<Tuple3<String, String, Integer>> partitionedDataSet = source.partitionByHash(0);

        // 4. 在分区内进行排序
        partitionedDataSet.sortPartition(1, Order.DESCENDING).sortPartition(2, Order.DESCENDING).print();
    }
}
