package com.codejiwei.flink.table;

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.*;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * author: codejiwei
 * date: 2023/8/9
 * desc: flink table api with batch mode
 **/
public class Flink_Table_API_BatchMode {
    public static void main(String[] args) throws Exception {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

//        env.setRuntimeMode(RuntimeExecutionMode.BATCH);

        StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, EnvironmentSettings.inBatchMode());

        Table table = tEnv.from(TableDescriptor.forConnector("datagen")
                .option("number-of-rows", "10")
                .schema(Schema.newBuilder()
                        .column("uid", DataTypes.TINYINT())
                        .column("payload", DataTypes.STRING())
                        .build())
                .build());

        tEnv.toDataStream(table)
                .keyBy(r -> r.<Byte>getFieldAs("uid"))
                .map(r -> "My custom operator: " + r.<String>getFieldAs("payload"))
                .executeAndCollect()
                .forEachRemaining(System.out::println);

    }
}
