package com.study.iceberg.flink;

import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.TableResult;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * 批量查询表iceberg表中的数据
 */
public class BatchQuery {


    public static void main(String[] args) {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        StreamTableEnvironment tblEnv = StreamTableEnvironment.create(env);

        Configuration conf =tblEnv.getConfig().getConfiguration();
        conf.setString("fs.s3a.aws.credentials.provider", "org.apache.hadoop.fs.s3a.SimpleAWSCredentialsProvider");
        conf.setString("fs.s3a.connection.ssl.enabled", "false");
        conf.setString("fs.s3a.endpoint", "http://127.0.0.1:9000");
        conf.setString("fs.s3a.access.key", "minioadmin");
        conf.setString("fs.s3a.secret.key", "minioadmin");
        conf.setString("fs.s3a.path.style.access", "true");
        conf.setString("fs.s3a.impl", "org.apache.hadoop.fs.s3a.S3AFileSystem");
        conf.setString("fs.s3a.fast.upload", "true");

        env.enableCheckpointing(1000);

        tblEnv.executeSql("create catalog hadoop_iceberg with (" +
                "'type'='iceberg',"+
                "'catalog_type'='hadoop',"+
                "'warehouse'='s3a://test/')");
        TableResult tableResult = tblEnv.executeSql(
                "select * from hadoop_iceberg.iceberg_db.user_info");
        tableResult.print();
    }



    private void realTimeRead(){
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        StreamTableEnvironment tblEnv = StreamTableEnvironment.create(env);

        Configuration configuration = tblEnv.getConfig().getConfiguration();
        // 支持sql语法中的options选项
        configuration.setBoolean("table.dynamic-table-options.enable",true);

        env.enableCheckpointing(1000);

        tblEnv.executeSql("create catalog hadoop_iceberg with (" +
                "'type'='iceberg',"+
                "'catalog_type'='hadoop',"+
                "'warehouse'='s3a://test/')");
        //流式读取数据  https://iceberg.apache.org/docs/latest/flink-queries/#flink-batch-read
        TableResult tableResult = tblEnv.executeSql(
                "SELECT * FROM hadoop_iceberg.iceberg_db.user_info /*+ OPTIONS('streaming'='true', 'monitor-interval'='1s')*/");
        tableResult.print();


        // 基于增量流式读取数据
//        TableResult tableResult2 = tblEnv.executeSql(
//                "SELECT * FROM hadoop_iceberg.iceberg_db.user_info /*+ OPTIONS('streaming'='true', 'monitor-interval'='1s', 'start-snapshot-id'='3821550127947089987')*/ ;");
//        tableResult2.print();
    }
}
