package com.study.iceberg.minio;

import com.google.common.collect.ImmutableMap;
import com.study.iceberg.utils.DateUtils;
import org.apache.iceberg.*;
import org.apache.iceberg.catalog.Catalog;
import org.apache.iceberg.catalog.TableIdentifier;
import org.apache.iceberg.data.GenericRecord;
import org.apache.iceberg.data.IcebergGenerics;
import org.apache.iceberg.data.Record;
import org.apache.iceberg.data.parquet.GenericParquetWriter;
import org.apache.iceberg.hadoop.HadoopCatalog;
import org.apache.iceberg.io.CloseableIterable;
import org.apache.iceberg.io.DataWriter;
import org.apache.iceberg.io.FileIO;
import org.apache.iceberg.io.OutputFile;
import org.apache.iceberg.parquet.Parquet;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
import org.apache.iceberg.types.Types;

import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.UUID;

/**
 * @author mayjean
 */
public class ReadAndWriteData extends Base {

    public static void main(String[] args) throws Exception {
        new ReadAndWriteData().process();
    }


    private void process() throws IOException {
        final String warehousePath = "s3a://test/";//minio bucket 路径
        Catalog catalog = new HadoopCatalog(getConfiguration(), warehousePath);

        //配置iceberg 库名和表名
        TableIdentifier name = TableIdentifier.parse("iceberg_db.user_info");

        Table table;
        // 通过catalog判断表是否存在，不存在就创建，存在就加载
        if (catalog.tableExists(name)) {
            table = catalog.loadTable(name);
        } else {
            //创建Iceberg表Schema
            Schema schema = new Schema(
                    Types.NestedField.required(1, "id", Types.IntegerType.get()),
                    Types.NestedField.required(2, "name", Types.StringType.get()),
                    Types.NestedField.required(3, "age", Types.IntegerType.get()),
                    Types.NestedField.required(4, "loc", Types.StringType.get()),
                    Types.NestedField.required(5, "day", Types.StringType.get())
            );
            //如果有分区指定对应分区，这里“loc”列为分区列，可以指定unpartitioned 方法不设置表分区
            //PartitionSpec spec = PartitionSpec.unpartitioned();
            PartitionSpec spec = PartitionSpec.builderFor(schema).identity("day").build();

            //指定Iceberg表数据格式化为Parquet存储
            Map<String, String> props = ImmutableMap.of(TableProperties.DEFAULT_FILE_FORMAT, FileFormat.PARQUET.name());
            table = catalog.createTable(name, schema, spec, props);
        }

        //写入数据
//        writeFirstData(table);
        // 追加数据
        writeAppendData(table);
        //查询数据
        catalogScan(table);
    }

    public void writeFirstData(Table table) throws IOException {
        Schema schema = table.schema();
        Map<String, Object> map = new HashMap<>();
        map.put("id", 1);
        map.put("name", "zs");
        map.put("age", 18);
        map.put("loc", "http://www.baidu.com");
        map.put("day", "2024030817");
        //1,构建表，构建插入数据
        GenericRecord record = GenericRecord.create(schema).copy(map);

//        ImmutableList.Builder<GenericRecord> builder = ImmutableList.builder();
        // 注意: required数据列要在schema必须要有,不然会报错
//        builder.add(record.copy(ImmutableMap.of("id", 1, "name", "liyang", "age", 18, "loc", "qw")));
//        builder.add(record.copy(ImmutableMap.of("id", 2, "name", "chengx", "age", 20, "loc", "四川")));
//        ImmutableList<GenericRecord> records = builder.build();
        // 2. 将记录写入parquet文件
        System.out.println("table.location(): " + table.location());
        String filepath = table.location() + "/" + DateUtils.format("yyyyMMddHHmmss");//UUID.randomUUID().toString();
        FileIO fileIO = table.io();
        OutputFile file = fileIO.newOutputFile(filepath);
        DataWriter<GenericRecord> dataWriter =
                Parquet.writeData(file)
                        .schema(schema)
                        .createWriterFunc(GenericParquetWriter::buildWriter)
                        .overwrite()
                        .withSpec(PartitionSpec.unpartitioned())
                        .build();
        try {
            dataWriter.write(record);
        } finally {
            dataWriter.close();
        }

        // 3. 将文件写入table中
        DataFile dataFile = dataWriter.toDataFile();
        table.newAppend()
                .appendFile(dataFile)
                .commit();
    }

    public void writeAppendData(Table table) throws IOException {
        Schema schema = table.schema();
        //1,构建表，构建插入数据
        GenericRecord record = GenericRecord.create(schema);
        ImmutableList.Builder<GenericRecord> builder = ImmutableList.builder();
        // 注意: required数据列要在schema必须要有,不然会报错
        builder.add(record.copy(ImmutableMap.of("id", 4, "name", "zhang", "age", 21, "loc", "www.zhihu.com","day","2024031110")));
        ImmutableList<GenericRecord> records = builder.build();
        // 2. 将记录写入parquet文件
        System.out.println("table.location(): " + table.location());
        final String filepath = table.location() + "/" + "zhang.txt";//UUID.randomUUID().toString();
        System.out.println("filepath:" + filepath);
        FileIO fileIO = table.io();
        OutputFile file = fileIO.newOutputFile(filepath);
        DataWriter<GenericRecord> dataWriter =
                Parquet.writeData(file)
                        .schema(schema)
                        .createWriterFunc(GenericParquetWriter::buildWriter)
                        .overwrite()
                        .withSpec(PartitionSpec.unpartitioned())
                        .build();
        try {
            dataWriter.write(records);
        } finally {
            dataWriter.close();
        }

        // 3. 将文件写入table中
        DataFile dataFile = dataWriter.toDataFile();
        table.newAppend()
                .appendFile(dataFile)
                .commit();
    }

    private void catalogScan(Table table) {
        final int columnsSize = table.schema().columns().size();
        for (Types.NestedField field : table.schema().columns()) {
            System.out.println(field.fieldId() + " " + field.name() + " " + field.type() + " " + field.asRequired().isRequired());
        }

        IcebergGenerics.ScanBuilder scanBuilder = IcebergGenerics.read(table);
        CloseableIterable<Record> records = scanBuilder.build();
        for (Record r : records) {
            for (int i = 0; i < columnsSize; i++) {
                System.out.print(r.get(i) + " ");
            }
            System.out.println();
        }
    }
}
