package com.flink.paimon;

import org.apache.flink.api.common.RuntimeExecutionMode;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.types.Row;
import org.apache.flink.types.RowKind;

/**
 * FlinkStreamRead
 *
 * @author caizhiyang
 * @since 2024-04-19
 */
public class FlinkStreamRead {

    /**
     * 使用Flink API - FlinkSQL读取paimon表
     * @param args
     * @throws Exception
     */
    public static void main(String[] args) throws Exception {
        System.setProperty("HADOOP_USER_NAME", "root");
        // create environments of both APIs
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        //设置模式为批处理
        env.setRuntimeMode(RuntimeExecutionMode.BATCH);

        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

        // create paimon catalog
        tableEnv.executeSql("CREATE CATALOG paimon WITH ('type' = 'paimon', 'warehouse'='hdfs://172.0.107.57:8082/paimon/main')");
        tableEnv.executeSql("USE CATALOG paimon");

        // convert to DataStream
//        Table table = tableEnv.sqlQuery("SELECT * FROM remote_table limit 1 OFFSET 3");
        Table table = tableEnv.sqlQuery("SELECT * FROM paimon_test");
        DataStream<Row> dataStream = tableEnv.toChangelogStream(table);

        // use this datastream
        /*dataStream.executeAndCollect().forEachRemaining((row)->{
            RowKind kind = row.getKind();
            Object orderId = row.getField(0);
            Object price = row.getField(1);
            System.out.println(String.format("%s[%s - %s]",kind,orderId,price));
        });*/
        dataStream.executeAndCollect().forEachRemaining(System.out::println);
    }
}
