package com.study.iceberg.flink;

import org.apache.flink.api.java.ExecutionEnvironment;
import org.apache.hadoop.conf.Configuration;
import org.apache.iceberg.Table;
import org.apache.iceberg.actions.RewriteDataFilesActionResult;
import org.apache.iceberg.catalog.TableIdentifier;
import org.apache.iceberg.flink.actions.Actions;
import org.apache.iceberg.hadoop.HadoopCatalog;

/**
 * 通过ICEBERG提供的表对象API对iceberg表进行操作
 * @author xxx
 */
public class RewriteDataFiles {
    public static void main(String[] args) {
        ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

        // 1、配置tableLoader
        Configuration hadoopConf = new Configuration();
        HadoopCatalog hadoopCatalog = new HadoopCatalog(hadoopConf,"s3a://test/");

        // 2、获取表对象
        Table table = hadoopCatalog.loadTable(TableIdentifier.of("iceberg_db", "user_Info"));

        // 有了表对象就可以访问元数据，维护表信息
        System.out.println(table.history());
        System.out.println(table.currentSnapshot().snapshotId());

        // 3、合并小文件
        RewriteDataFilesActionResult result = Actions.forTable(table)
                .rewriteDataFiles()
                // 默认512m-->536870912L,可以手动通过一下指定合并文件大小,小于1024字节的文件都进行合并
                .targetSizeInBytes(1024L)
                .execute();
        System.out.println(result);
    }
}
