package com.study.iceberg.flink;

import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.iceberg.Table;
import org.apache.iceberg.catalog.Catalog;
import org.apache.iceberg.catalog.TableIdentifier;
import org.apache.iceberg.flink.CatalogLoader;
import org.apache.iceberg.flink.TableLoader;
import org.apache.hadoop.conf.Configuration;
import org.apache.flink.table.data.RowData;
import org.apache.iceberg.flink.source.FlinkSource;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.iceberg.hadoop.HadoopCatalog;

import java.util.HashMap;
import java.util.Map;

/**
 * 使用DataStream api批量/实时 读取iceberg数据
 *
 * @author xxx
 */
public class StreamAPIReadIceberg {

    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        final String warehousePath = "s3a://test/";//minio bucket 路径
        //  创建Hadoop配置、Catalog配置和表的Schema，方便后续向路径写数据时可以找到对应的表
        Configuration conf = new Configuration();
        conf.set("fs.s3a.aws.credentials.provider", "org.apache.hadoop.fs.s3a.SimpleAWSCredentialsProvider");
        conf.set("fs.s3a.connection.ssl.enabled", "false");
        conf.set("fs.s3a.endpoint", "http://127.0.0.1:9000");
        conf.set("fs.s3a.access.key", "minioadmin");
        conf.set("fs.s3a.secret.key", "minioadmin");
        conf.set("fs.s3a.path.style.access", "true");
        conf.set("fs.s3a.impl", "org.apache.hadoop.fs.s3a.S3AFileSystem");
        conf.set("fs.s3a.fast.upload", "true");

        Catalog catalog = new HadoopCatalog(conf, warehousePath);

        TableLoader tableLoader = TableLoader.fromHadoopTable("s3a://test/", conf);
        TableIdentifier name = TableIdentifier.parse("iceberg_db.user_info");

        Map<String,String> map =  new HashMap<>();
        map.put("warehouse",warehousePath);
        CatalogLoader catalogLoader = CatalogLoader.hadoop("s3a://test/",conf, map);
        TableLoader tl = TableLoader.fromCatalog(catalogLoader,name);

        Table table = catalog.loadTable(TableIdentifier.parse("iceberg_db.user_info"));

        DataStream<RowData> batchData = FlinkSource.forRowData().env(env)
//                .tableLoader(tableLoader)
                .tableLoader(tl)
                .table(table)
                // 默认为false，整批读取需要设置为true 为流式读取
                .streaming(false)
                // 基于快照读取id 实时增量读取数据
                //.startSnapshotId(0L)
                .build();

        batchData.map(new MapFunction<RowData, String>() {

                    @Override
                    public String map(RowData rowData) throws Exception {
//                        int id = rowData.getInt(0);
//                        String name = rowData.getString(1).toString();
//                        int age = rowData.getInt(2);
//                        String loc = rowData.getString(3).toString();
//
//                        return id + "," + name + "," + age + "," + loc;
                        System.out.println(rowData);
                        return rowData.toString();
                    }
                })
                .print();

        env.execute("DataStream API read data from iceberg");
    }
}
