package architect;

import org.apache.flink.api.common.io.RichOutputFormat;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.types.Row;
import org.apache.hadoop.fs.Path;
import org.apache.parquet.example.data.Group;
import org.apache.parquet.example.data.simple.SimpleGroupFactory;
import org.apache.parquet.hadoop.ParquetFileWriter;
import org.apache.parquet.hadoop.ParquetWriter;
import org.apache.parquet.hadoop.example.ExampleParquetWriter;
import org.apache.parquet.schema.MessageType;
import org.apache.parquet.schema.PrimitiveType;
import org.apache.parquet.schema.Types;

import java.io.IOException;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;

import static org.apache.parquet.schema.OriginalType.UTF8;

/**
 * @title: BzParquetOutputFormat
 * @description:
 * @author: guan.xin
 * @create: 2022-04-12 21:29
 * @since: JDK8
 */
public class ParquetOutputFormat extends RichOutputFormat<Row> {

    private String filePath;

    private String tableName;

    private Map<String, PrimitiveType.PrimitiveTypeName> schemaMap;

    private ParquetWriter<Group> writer;

    private SimpleGroupFactory factory;

    private AtomicInteger atomicInteger = new AtomicInteger(0);

    public ParquetOutputFormat(String filePath, String tableName, Map<String, PrimitiveType.PrimitiveTypeName> schemaMap) {
        this.filePath = filePath;
        this.tableName = tableName;
        this.schemaMap = schemaMap;
    }

    @Override
    public void configure(Configuration parameters) {

    }

    @Override
    public void open(int taskNumber, int numTasks) {
        Types.MessageTypeBuilder messageTypeBuilder = Types.buildMessage();
        schemaMap.forEach((k, v) -> {
            messageTypeBuilder.required(v).as(UTF8).named(k);
        });
        MessageType messageType = messageTypeBuilder.named(tableName);

        factory = new SimpleGroupFactory(messageType);
        try {
            writer = ExampleParquetWriter.builder(new Path(filePath))
                    .withWriteMode(ParquetFileWriter.Mode.OVERWRITE)
                    .withType(messageType)
                    .build();
        } catch (IOException e) {
            e.printStackTrace();
        }
    }

    @Override
    public void writeRecord(Row row) throws IOException {
        System.out.println(atomicInteger.getAndAdd(1));
        if (atomicInteger.get() == 3) {
            throw new IOException();
        }
        writer.write(fullValue(row, factory));
        System.out.println("==" + atomicInteger.get());
    }

    static Group fullValue(Row row, SimpleGroupFactory factory) {
        Group group = factory.newGroup();
        group.add(0, row.getFieldAs(0).toString());
        group.add(1, (String) row.getField(1));
        // ParquetTimestampUtils.getTimestampMillis(Binary.fromString())
//        group.add(2, row.getFieldAs(2).toString());
//        group.add(3, row.getFieldAs(3).toString());
//        group.add(4, row.getFieldAs(4).toString());

        return group;
    }

    @Override
    public void close() throws IOException {
        writer.close();
    }
}
