package com.sugon.ohdfs.integration.flink.job.stream.sink;

import com.sugon.ohdfs.integration.flink.domain.TestItem;
import com.sugon.ohdfs.integration.flink.util.DatetimeUtil;
import org.apache.flink.core.fs.Path;
import org.apache.flink.orc.vector.Vectorizer;
import org.apache.flink.orc.writer.OrcBulkWriterFactory;
import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.springframework.beans.factory.annotation.Value;

import java.io.IOException;
import java.io.Serializable;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;

public class BulkOrcStreamSinkJob extends AbstractStreamSinkJob{

    @Value("${hdfs.base-path:hdfs://10.11.8.29:9001/flink/sinkTest/stream/sink/bulk/orc}")
    String basePath;

    @Override
    protected Serializable transform(TestItem item) {
        return item;
    }

    @Override
    protected StreamingFileSink buildSink() {
        OrcBulkWriterFactory<TestItem> orcBulkWriterFactory = new OrcBulkWriterFactory<TestItem>(new TestItemVectorizer(TestItemVectorizer.schema));
        return StreamingFileSink
                .<TestItem>forBulkFormat(new Path(basePath), orcBulkWriterFactory)
                .build();
    }

    /**
     * 用于orc文件输出的序列化
     */
    public class TestItemVectorizer extends Vectorizer<TestItem> implements Serializable {

        public static final String schema = "struct<_col0:string,_col1:string,_col2:bigint,_col3:bigint>";

        public TestItemVectorizer(String schema){
            super(schema);
        }

        @Override
        public void vectorize(TestItem item, VectorizedRowBatch batch) throws IOException {
            BytesColumnVector nameColVector = (BytesColumnVector) batch.cols[0];
            BytesColumnVector valueColVector = (BytesColumnVector) batch.cols[1];
            LongColumnVector produceColVector = (LongColumnVector) batch.cols[2];
            LongColumnVector processColVector = (LongColumnVector) batch.cols[3];
            int row = batch.size++;
            nameColVector.setVal(row, item.getName().getBytes(StandardCharsets.UTF_8));
            valueColVector.setVal(row, item.getValue().getBytes(StandardCharsets.UTF_8));
            produceColVector.vector[row] = DatetimeUtil.getEpochMilli(item.getProduceTimestamp());
            processColVector.vector[row] = DatetimeUtil.getEpochMilli(item.getProcessTimeStamp());
            //尝试增加私有元数据
            this.addUserMetadata("test", ByteBuffer.wrap("test".getBytes(StandardCharsets.UTF_8)));
        }
    }
}
