package com.flink.paimon;

import org.apache.paimon.catalog.Catalog;
import org.apache.paimon.catalog.Identifier;
import org.apache.paimon.data.BinaryString;
import org.apache.paimon.data.GenericRow;
import org.apache.paimon.data.NestedRow;
import org.apache.paimon.data.Timestamp;
import org.apache.paimon.flink.FlinkCatalogFactory;
import org.apache.paimon.options.Options;
import org.apache.paimon.table.Table;
import org.apache.paimon.table.sink.CommitMessage;
import org.apache.paimon.table.sink.StreamTableCommit;
import org.apache.paimon.table.sink.StreamTableWrite;
import org.apache.paimon.table.sink.StreamWriteBuilder;

import java.util.*;

/**
 * StreamInsert
 *
 * @author caizhiyang
 * @since 2024-04-19
 */
public class StreamInsert {

    /**
     * 使用Paimon的JavaAPI-流式写入paimon表
     * @param args
     */
    public static void main(String[] args) throws Exception {
        System.setProperty("HADOOP_USER_NAME", "root");
        Identifier identifier = Identifier.create("default", "dwd_paimon_test2");
        Options catalogOptions = new Options();
        catalogOptions.set("warehouse", "hdfs://172.0.107.57:8082/paimon/main");
        Catalog.Loader catalogLoader =
                () -> FlinkCatalogFactory.createPaimonCatalog(catalogOptions);
        Catalog catalog = catalogLoader.load();
        //获取paimon表
        Table table = catalog.getTable(identifier);


        //获取流式写入对象
        StreamWriteBuilder writeBuilder = table.newStreamWriteBuilder();

        // 2. Write records in distributed tasks
        StreamTableWrite write = writeBuilder.newWrite();
        // commitIdentifier like Flink checkpointId
        long commitIdentifier = 0;

        boolean flag = true;

        while (flag) {
//            GenericRow record1 = GenericRow.of(BinaryString.fromString("Alice"), 12);
            GenericRow record1 = GenericRow.of(21,BinaryString.fromString("张三21"),Timestamp.now());
            GenericRow record2 = GenericRow.of(22,BinaryString.fromString("李四22"),Timestamp.now());
            GenericRow record3 = GenericRow.of(23,BinaryString.fromString("王五23"),Timestamp.now());
            write.write(record1);
            write.write(record2);
            write.write(record3);
            List<CommitMessage> messages = write.prepareCommit(false, commitIdentifier);
            commitIdentifier++;

            // 3. Collect all CommitMessages to a global node and commit
            StreamTableCommit commit = writeBuilder.newCommit();
            commit.commit(commitIdentifier, messages);

            // 4. When failure occurs and you're not sure if the commit process is successful,
            //    you can use `filterAndCommit` to retry the commit process.
            //    Succeeded commits will be automatically skipped.

            Map<Long, List<CommitMessage>> commitIdentifiersAndMessages = new HashMap<>();
            commitIdentifiersAndMessages.put(commitIdentifier, messages);
            commit.filterAndCommit(commitIdentifiersAndMessages);

            Thread.sleep(1000);
            flag =false;
        }
    }
}
