package com.flink.paimon;


import org.apache.paimon.catalog.Catalog;
import org.apache.paimon.catalog.CatalogContext;
import org.apache.paimon.catalog.CatalogFactory;
import org.apache.paimon.catalog.Identifier;
import org.apache.paimon.data.InternalRow;
import org.apache.paimon.flink.FlinkCatalogFactory;
import org.apache.paimon.options.Options;
import org.apache.paimon.reader.RecordReader;
import org.apache.paimon.schema.Schema;
import org.apache.paimon.table.Table;
import org.apache.paimon.table.source.ReadBuilder;
import org.apache.paimon.table.source.TableRead;
import org.apache.paimon.types.DataTypes;
import org.apache.paimon.table.source.Split;

import java.util.List;
import java.util.concurrent.atomic.AtomicReference;

/**
 * Test2
 *
 * @author caizhiyang
 * @since 2024-05-15
 */
public class Test2 {
    public static void main(String[] args) throws Exception {
        /*long startTime = System.currentTimeMillis();
        //创建表
        createTable2();
        long startTime2 = System.currentTimeMillis();
        System.out.println(startTime2 - startTime);

        //删除表
        dropTable("remote_table8");
        long startTime3 = System.currentTimeMillis();
        System.out.println(startTime3 - startTime2);*/

        findSysTable("dwd_paimon_kafka" + Catalog.SYSTEM_TABLE_SPLITTER + "files");
    }


    public static  void createTable1() throws Exception {
        System.setProperty("HADOOP_USER_NAME", "root");
        Schema.Builder schemaBuilder = Schema.newBuilder();
        schemaBuilder.primaryKey("order_id");
        schemaBuilder.column("order_id", DataTypes.BIGINT());
        schemaBuilder.column("price", DataTypes.DOUBLE());
        Schema schema = schemaBuilder.build();

        Identifier identifier = Identifier.create("default", "remote_table7");
        Options catalogOptions = new Options();
        catalogOptions.set("warehouse", "hdfs://172.0.107.57:8082/paimon/main");
        Catalog.Loader catalogLoader =
                () -> FlinkCatalogFactory.createPaimonCatalog(catalogOptions);
        Catalog catalog = catalogLoader.load();
        catalog.createTable(identifier, schema, false);
        catalog.close();
    }

    public static  void createTable2() throws Exception {
        System.setProperty("HADOOP_USER_NAME", "root");
        Schema.Builder schemaBuilder = Schema.newBuilder();
        schemaBuilder.primaryKey("order_id");
        schemaBuilder.column("order_id", DataTypes.BIGINT());
        schemaBuilder.column("price", DataTypes.DOUBLE());
        Schema schema = schemaBuilder.build();

        Identifier identifier = Identifier.create("default", "remote_table8");
        Options catalogOptions = new Options();
        catalogOptions.set("warehouse", "hdfs://172.0.107.57:8082/paimon/main");
        CatalogContext context = CatalogContext.create(catalogOptions);
        Catalog catalog = CatalogFactory.createCatalog(context);
        catalog.createTable(identifier, schema, false);
        catalog.close();
    }


    public static void dropTable(String tableName) throws Exception {
        System.setProperty("HADOOP_USER_NAME", "root");
        Identifier identifier = Identifier.create("default", tableName);
        Options catalogOptions = new Options();
        catalogOptions.set("warehouse", "hdfs://172.0.107.57:8082/paimon/main");
        CatalogContext context = CatalogContext.create(catalogOptions);
        Catalog catalog = CatalogFactory.createCatalog(context);
        catalog.dropTable(identifier,true);
//        catalog.close();
    }

    public static void findSysTable(String tableName) throws Exception {
        System.setProperty("HADOOP_USER_NAME", "root");
        Identifier identifier = Identifier.create("default", tableName);
        Options catalogOptions = new Options();
        catalogOptions.set("warehouse", "hdfs://172.0.107.57:8082/paimon/main");
        Catalog.Loader catalogLoader =
                () -> FlinkCatalogFactory.createPaimonCatalog(catalogOptions);
        Catalog catalog = catalogLoader.load();
        //获取paimon表
        Table table = catalog.getTable(identifier);

        ReadBuilder readBuilder = table.newReadBuilder();

        // 2. Plan splits in 'Coordinator' (or named 'Driver')
        //newScan():创建批读Scan
        List<Split> splits = readBuilder.newScan().plan().splits();

        // 3. Distribute these splits to different tasks

        // 4. Read a split in task
        TableRead read = readBuilder.newRead();
        RecordReader<InternalRow> reader = read.createReader(splits);

        AtomicReference<InternalRow> lastRecord = new AtomicReference<>();
        reader.forEachRemaining((row)->{
            lastRecord.set(row);
        });
        long recordCount = lastRecord.get().getLong(6);
        long recordSize = lastRecord.get().getLong(7);
        System.out.println(recordCount);
        System.out.println(recordSize);
    }
}
