package david.java.flink_sql.streamApiIntegration;

import org.apache.flink.api.common.RuntimeExecutionMode;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.api.Schema;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.TableDescriptor;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * @Description:
 * @Link https://nightlies.apache.org/flink/flink-docs-release-1.14/docs/dev/table/data_stream_api/#batch-runtime-mode
 *
 * The following example shows how to play around with batch mode using the DataGen table source.
 * Many sources offer options that implicitly make the connector bounded, for example, by defining a terminating offset or timestamp.
 * In our example, we limit the number of rows with the number-of-rows option.
 * @Author: ZhaoDawei
 * @Date: Create in 10:24 上午 2022/3/25
 */
public class S2_BatchMode {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setRuntimeMode(RuntimeExecutionMode.BATCH);
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

        Table table =
                tableEnv.from(
                        TableDescriptor.forConnector("datagen")
                        .schema(
                                Schema.newBuilder()
                                .column("uid", DataTypes.TINYINT())
                                .column("payload", DataTypes.STRING())
                                .build())
                        .build());

        tableEnv.toDataStream(table)
                .keyBy(r -> r.<Byte>getFieldAs("uid"))
                .map(r -> "My custom operator: " + r.<String>getFieldAs("payload"))
                .executeAndCollect()
                .forEachRemaining(System.out::println);

    }
}
