package com.study.iceberg.minio;

import com.google.common.collect.ImmutableMap;
import org.apache.iceberg.*;
import org.apache.iceberg.actions.RewriteDataFilesActionResult;
import org.apache.iceberg.catalog.Catalog;
import org.apache.iceberg.catalog.TableIdentifier;
import org.apache.iceberg.data.GenericRecord;
import org.apache.iceberg.data.IcebergGenerics;
import org.apache.iceberg.data.Record;
import org.apache.iceberg.data.parquet.GenericParquetWriter;
import org.apache.iceberg.hadoop.HadoopCatalog;
import org.apache.iceberg.io.CloseableIterable;
import org.apache.iceberg.io.DataWriter;
import org.apache.iceberg.io.FileIO;
import org.apache.iceberg.io.OutputFile;
import org.apache.iceberg.parquet.Parquet;
import org.apache.iceberg.types.Type;
import org.apache.iceberg.types.Types;

import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.util.HashMap;
import java.util.Map;
import java.util.UUID;

/**
 * @author mayjean
 */
public class WriteFileData extends Base {

    public static void main(String[] args) throws Exception {
        new WriteFileData().process();
    }


    private void process() throws IOException {
        final String warehousePath = "s3a://test/";//minio bucket 路径
        Catalog catalog = new HadoopCatalog(getConfiguration(), warehousePath);

        //配置iceberg 库名和表名
        TableIdentifier name = TableIdentifier.parse("iceberg_db.txt_file");

        Table table;
        // 通过catalog判断表是否存在，不存在就创建，存在就加载
        if (catalog.tableExists(name)) {
            table = catalog.loadTable(name);
        } else {
            //创建Iceberg表Schema
            Schema schema = new Schema(
                    Types.NestedField.required(1, "id", Types.IntegerType.get()),
                    Types.NestedField.required(2, "name", Types.StringType.get()),
                    Types.NestedField.required(3, "type", Types.StringType.get()),
                    Types.NestedField.required(4, "file", Types.BinaryType.get())
            );
            //如果有分区指定对应分区，这里“loc”列为分区列，可以指定unpartitioned 方法不设置表分区
            PartitionSpec spec = PartitionSpec.unpartitioned();
            //PartitionSpec spec = PartitionSpec.builderFor(schema).identity("loc").build();

            //指定Iceberg表数据格式化为Parquet存储
            Map<String, String> props = ImmutableMap.of(TableProperties.DEFAULT_FILE_FORMAT, FileFormat.PARQUET.name());
            table = catalog.createTable(name, schema, spec, props);
        }

        //写入数据
//        writeAppendData(table);
        //查询数据
//        catalogScan(table);

        mergeFileOnFlink(table);
    }

    public void writeAppendData(Table table) throws IOException {
        Schema schema = table.schema();
        //1,构建表，构建插入数据
        GenericRecord record = GenericRecord.create(schema);
        // 注意: required数据列要在schema必须要有,不然会报错
        record.copy(ImmutableMap.of("id", 1, "name", "hello.txt", "type", "txt", "file", castFile("d://hello.txt")));
        // 2. 将记录写入parquet文件
        System.out.println("table.location(): " + table.location());
        String filepath = table.location() + "/" + UUID.randomUUID().toString();
        FileIO fileIO = table.io();

        OutputFile file = fileIO.newOutputFile(filepath);
        DataWriter<GenericRecord> dataWriter =
                Parquet.writeData(file)
                        .schema(schema)
                        .createWriterFunc(GenericParquetWriter::buildWriter)
                        .overwrite()
                        .withSpec(PartitionSpec.unpartitioned())
                        .build();
        try {
            dataWriter.write(record);
        } finally {
            dataWriter.close();
        }

        // 3. 将文件写入table中
        DataFile dataFile = dataWriter.toDataFile();
        table.newAppend()
                .appendFile(dataFile)
                .commit();
    }

    public ByteBuffer castFile(String localPath) throws IOException {
        File file = new File(localPath);
        FileChannel channel = new FileInputStream(file).getChannel();
        return channel.map(FileChannel.MapMode.READ_ONLY, 0, channel.size());
    }

    private void catalogScan(Table table) {
        Map<String, Type> columns = new HashMap<>();
        for (Types.NestedField field : table.schema().columns()) {
            System.out.println(field.fieldId() + " " + field.name() + " " + field.type() + " " + field.asRequired().isRequired());
            columns.put(field.name(), field.type());
        }

        IcebergGenerics.ScanBuilder scanBuilder = IcebergGenerics.read(table);
        CloseableIterable<Record> records = scanBuilder.build();
        for (Record r : records) {
            System.out.println(r.get(0) + "|" + r.get(1) + "|" + r.get(2) + "|" + r.get(3));
            //if (Objects.equals(entry.getValue().toString(), Types.BinaryType.get().toString())) {
            castFile((ByteBuffer) r.get(3), "d://tmp//", String.valueOf(r.get(1)));

        }
    }

    private boolean castFile(ByteBuffer buffer, String dir, String fileName) {
        //ByteBuffer bf = ByteBuffer.allocate(1024);
        try (FileOutputStream fos = new FileOutputStream(new File(dir + fileName));
             FileChannel channel = fos.getChannel();) {
            channel.write(buffer);
            return true;
        } catch (Exception ex) {
            ex.printStackTrace();
            return false;
        }
    }

    // 这个任务目前仅仅将小文件进行了合并生成大文件，但旧的文件并没有删除，也就是文件反而变多了
    private void mergeFileOnFlink(Table table) {
        // 合并小文件
//        RewriteDataFilesActionResult result = org.apache.iceberg.flink.actions.Actions.forTable(table)
//                .rewriteDataFiles()
//                // 小于1024字节的文件都进行合并
//                .targetSizeInBytes(1024L)
//                .execute();
    }
}
