package com.study.iceberg.minio;

import org.apache.iceberg.Snapshot;
import org.apache.iceberg.Table;
import org.apache.iceberg.catalog.Catalog;
import org.apache.iceberg.catalog.TableIdentifier;
import org.apache.iceberg.hadoop.HadoopCatalog;

import java.io.IOException;

/**
 * @author mayjean
 */
public class UnionDelData extends Base {

    public static void main(String[] args) throws Exception {
        new UnionDelData().process();
    }


    private void process() throws IOException {
        final String warehousePath = "s3a://test/";//minio bucket 路径
        Catalog catalog = new HadoopCatalog(getConfiguration(), warehousePath);

        //配置iceberg 库名和表名
        TableIdentifier name = TableIdentifier.parse("iceberg_db.txt_file");

        Table table;
        // 通过catalog判断表是否存在，不存在就创建，存在就加载
        if (catalog.tableExists(name)) {
            table = catalog.loadTable(name);
        } else {
            return;
        }

        unionDataFile(table, 2);
        deleteSnap(table, 1 * 24 * 60 * 60 * 1000L, 6);
    }

    // 这个任务目前仅仅将小文件进行了合并生成大文件，但旧的文件并没有删除，也就是文件反而变多了
    public void unionDataFile(Table table, int parallelism) {
//        org.apache.iceberg.flink.actions.Actions.forTable(table)
//                .rewriteDataFiles()
//                .maxParallelism(parallelism)
//                .caseSensitive(false)
//                // 小于1024字节的文件都进行合并
//                .targetSizeInBytes(1024L)
//                .execute();
    }

    public void deleteSnap(Table table, long retainTimeMillis, int retainLastNum) {
        Snapshot snapshot = table.currentSnapshot();
        long oldSnapshot = snapshot.timestampMillis() - retainTimeMillis;
        table.expireSnapshots()
                .expireOlderThan(oldSnapshot)
                .cleanExpiredFiles(true)
                .retainLast(retainLastNum)
                .commit();
    }
}
