package com.ry.flink.jobx;
import org.apache.flink.api.common.RuntimeExecutionMode;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.api.Schema;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.TableDescriptor;
import org.apache.flink.table.api.bridge.java.StreamStatementSet;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 *  6. 关于 TableDescription
 * */
public class Job6 {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setRuntimeMode(RuntimeExecutionMode.BATCH);
        StreamTableEnvironment tablEnv = StreamTableEnvironment.create(env);
        Table table = tablEnv.from(TableDescriptor.forConnector("datagen")
                .option("number-of-rows", "100")//生成100行数据
                .schema(Schema.newBuilder()
                        .column("uid", DataTypes.TINYINT())
                        .column("payload", DataTypes.STRING())
                        .build())
                .build());

        //table 按照 uid 进行 keyBy 然后输出内容
        tablEnv.toDataStream(table)
                .keyBy(r -> r.getFieldAs("uid"))
                .map(r -> "data :"+ r.getFieldAs("payload"))
                .executeAndCollect()
                .forEachRemaining(System.out::println);
    }
}
