package com.hhf.rrd.transformation;

import com.hhf.rrd.utils.MysqlUtils;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.functions.MapPartitionFunction;
import org.apache.flink.api.java.ExecutionEnvironment;
import org.apache.flink.api.java.operators.DataSource;
import org.apache.flink.util.Collector;

import java.sql.Connection;
import java.util.ArrayList;
import java.util.List;

/**
 * 批量处理数据
 *
 * 该算子在每个并行分区中调用一次，整个分区通过给定的迭代器遍历。
 *      MapPartition函数的每个实例看到的元素数量是不确定的，取做于操作的并行性。
 *      该函数无法作用单个函数，如果是单个函数，使用map算子或者flatMap算子
 *
 * @author huanghaifeng15
 * @date 2022/2/11 12:57
 **/
public class MapPartitionApp {
    public static void main(String[] args) throws Exception {
        ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

        List<String> list = new ArrayList<>();
        for (int i = 0; i < 100; i++) {
            list.add("student:" + i);
        }

        DataSource<String> data = env.fromCollection(list).setParallelism(5);
        System.out.println("====" + data.getParallelism());

        data.mapPartition(new MapPartitionFunction<String, String>() {
            @Override
            public void mapPartition(Iterable<String> values, Collector<String> out) throws Exception {
                Connection connection = MysqlUtils.getConnection();
                System.out.println("connection: " + connection.getSchema());

                // TODO... 业务逻辑处理

            }
        }).print();

        data.map(new MapFunction<String, String>() {

            /**
             * 假设：进来一条数据，要与业务库中的数据进行相关的操作
             */
            @Override
            public String map(String value) throws Exception {
                Connection connection = MysqlUtils.getConnection();
                System.out.println("connection: " + connection.getSchema());

                // TODO... 业务逻辑处理

                return value;
            }
        });
    }
}
