package com.study.iceberg.minio;

import com.google.common.collect.ImmutableMap;
import org.apache.iceberg.*;
import org.apache.iceberg.catalog.Catalog;
import org.apache.iceberg.catalog.TableIdentifier;
import org.apache.iceberg.data.GenericRecord;
import org.apache.iceberg.data.IcebergGenerics;
import org.apache.iceberg.data.Record;
import org.apache.iceberg.data.parquet.GenericParquetWriter;
import org.apache.iceberg.expressions.Expressions;
import org.apache.iceberg.hadoop.HadoopCatalog;
import org.apache.iceberg.io.*;
import org.apache.iceberg.parquet.Parquet;
//import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
import org.apache.iceberg.types.Types;

import java.io.IOException;
import java.util.Iterator;
import java.util.Map;
import java.util.UUID;

/**
 * @author mayjean
 */
public class QueryData extends Base {

    public static void main(String[] args) throws IOException {
        new QueryData().process();
    }

    private void process() throws IOException {
        final String warehousePath = "s3a://test/";//minio bucket 路径
        Catalog catalog = new HadoopCatalog(getConfiguration(), warehousePath);

        //配置iceberg 库名和表名
        TableIdentifier name = TableIdentifier.parse("iceberg_db.user_info");
        // 通过catalog判断表是否存在，不存在就创建，存在就加载
        if (!catalog.tableExists(name)) {
            return;
        }

        Table table = catalog.loadTable(name);
//        scanningOnFileLevel(table);
        scanningOnRowLevel(table);
//        updateOperations(table);
//        writeAppendData(table);
    }

    // https://iceberg.apache.org/docs/1.4.2/api/#file-level
    private void scanningOnFileLevel(Table table) {
        TableScan scan = table.newScan();
        TableScan filteredScan = scan.filter(Expressions.equal("id", 1));
        Iterable<CombinedScanTask> tasks = filteredScan.planTasks();
        for (CombinedScanTask combinedScanTask : tasks) {
            for (FileScanTask fileScanTask : combinedScanTask.files()) {
                DataFile dataFile = fileScanTask.file();
                System.out.println(dataFile);
                System.out.println(dataFile.content().id());
                System.out.println(dataFile.content().name());
            }
        }
    }


    private void scanningOnRowLevel(Table table) {
        CloseableIterable<Record> result = IcebergGenerics.read(table)
                .where(Expressions.greaterThan("id", 1))
                .build();
        for (Record r : result) {
            int size = r.size();
            for (int i = 0; i < size; i++) {
                System.out.print(r.get(i) + "|");
            }
            System.out.println();
        }
    }

    // https://iceberg.apache.org/docs/1.4.2/api/#update-operations
    private void updateOperations(Table table) {
        table.updateSchema()
                .addColumn("address", Types.StringType.get())
                .commit();
    }

    public void writeAppendData(Table table) throws IOException {
        Schema schema = table.schema();
        //1,构建表，构建插入数据
        GenericRecord record = GenericRecord.create(schema);
        // 注意: required数据列要在schema必须要有,不然会报错
        record.copy(ImmutableMap.of("id", 3, "name", "lisi", "age", 21, "loc", "qw", "address", "高新区"));

        String filepath = table.location() + "/" + UUID.randomUUID().toString();
        System.out.println(filepath);
        FileIO fileIO = table.io();
        DataWriter<GenericRecord> dataWriter = Parquet.writeData(fileIO.newOutputFile(filepath))
                .schema(schema)
                .createWriterFunc(GenericParquetWriter::buildWriter)
                .overwrite()
                .withSpec(PartitionSpec.unpartitioned())
                .build();
        try {
            dataWriter.write(record);
        } finally {
            dataWriter.close();
        }
        // 3. 将文件写入table中
        DataFile dataFile = dataWriter.toDataFile();
        table.newAppend()
                .appendFile(dataFile)
                .commit();
    }

}
