package com.yifeng.repo.flink.data.transport;

import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.data.RowData;
import org.apache.hadoop.conf.Configuration;
import org.apache.iceberg.flink.TableLoader;
import org.apache.iceberg.flink.source.FlinkSource;

/**
 * 使用DataStream Api 批量/实时 读取Iceberg 数据
 */
public class StreamAPIReadIceberg {
    public static void main(String[] args) throws Exception {
        // 本地测试使用
        System.setProperty("HADOOP_USER_NAME","uflink");
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        //1.配置TableLoader
        Configuration hadoopConf = new Configuration();
        TableLoader tableLoader = TableLoader.fromHadoopTable("hdfs://192.168.253.132:8020/warehouse/tablespace/managed/iceberg/sample2", hadoopConf);

        //2.从Iceberg中读取全量/增量读取数据
        DataStream<RowData> batchData = FlinkSource.forRowData().env(env)
                .tableLoader(tableLoader)
        //默认为false,整批次读取，设置为true 为流式读取
                .streaming(true).build();

        batchData.map(new MapFunction<RowData, String>() {
            @Override
            public String map(RowData rowData) throws Exception {
                int id = rowData.getInt(0);
                String data = rowData.getString(1).toString();
                if(id>=6300){
                    String columns1 = rowData.getString(3).toString();
                }
                return id + "," + data;
            }

        }).print();

        env.execute("ReadIceberg");
    }
}
