package com.flink.paimon;

import com.google.common.collect.Lists;
import org.apache.paimon.catalog.Catalog;
import org.apache.paimon.catalog.Identifier;
import org.apache.paimon.data.InternalRow;
import org.apache.paimon.flink.FlinkCatalogFactory;
import org.apache.paimon.options.Options;
import org.apache.paimon.predicate.Predicate;
import org.apache.paimon.predicate.PredicateBuilder;
import org.apache.paimon.reader.RecordReader;
import org.apache.paimon.table.Table;
import org.apache.paimon.table.source.ReadBuilder;
import org.apache.paimon.table.source.Split;
import org.apache.paimon.table.source.StreamTableScan;
import org.apache.paimon.table.source.TableRead;
import org.apache.paimon.types.DataTypes;
import org.apache.paimon.types.RowType;

import java.util.List;

/**
 * StreamInsert
 *
 * @author caizhiyang
 * @since 2024-04-19
 */
public class BatchRead {

    /**
     * 使用Paimon的JavaAPI-批式读取paimon表
     * @param args
     */
    public static void main(String[] args) throws Exception {
        System.setProperty("HADOOP_USER_NAME", "hadoop");
        Identifier identifier = Identifier.create("default", "remote_table");
        Options catalogOptions = new Options();
        catalogOptions.set("warehouse", "hdfs://node1:8020/paimon/fs");
        Catalog.Loader catalogLoader =
                () -> FlinkCatalogFactory.createPaimonCatalog(catalogOptions);
        Catalog catalog = catalogLoader.load();
        //获取paimon表
        Table table = catalog.getTable(identifier);


        //构建条件
        PredicateBuilder builder = new PredicateBuilder(RowType.of(DataTypes.BIGINT(), DataTypes.DOUBLE()));
        //第一列不为空
        Predicate notNull = builder.isNotNull(0);
        //第二列 >= 0.0
        Predicate greaterOrEqual = builder.greaterOrEqual(1, 0.0);

        int[] projection = new int[] {0, 1};

        ReadBuilder readBuilder =
                table.newReadBuilder()
                        .withProjection(projection)
                        .withFilter(Lists.newArrayList(notNull, greaterOrEqual));


        // 2. Plan splits in 'Coordinator' (or named 'Driver')
        //newScan():创建批读Scan
        List<Split> splits = readBuilder.newScan().plan().splits();

        // 3. Distribute these splits to different tasks

        // 4. Read a split in task
        TableRead read = readBuilder.newRead();
        RecordReader<InternalRow> reader = read.createReader(splits);
        reader.forEachRemaining((row)->{
            long orderId = row.getLong(0);
            double price = row.getDouble(1);
            System.out.println(String.format("%s - %s",orderId,price));
        });
    }
}
