package com.six.compress.old;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.orc.CompressionKind;
import org.apache.orc.OrcFile;
import org.apache.orc.TypeDescription;
import org.apache.orc.Writer;
import org.apache.orc.storage.ql.exec.vector.LongColumnVector;
import org.apache.orc.storage.ql.exec.vector.VectorizedRowBatch;

/**
 * <p/>
 * <li>Description: </li>
 * <li>@autor: mengjie <jie.meng@cdcalabar.com> </li>
 * <li>Date: 2018/11/26 </li>
 */
@SuppressWarnings("all")
public class OrcLoaderTest {

    public static void main(String[] args) {
        int count = 0;
        long start = System.currentTimeMillis();
        //定义ORC数据结构，即表结构
        TypeDescription schema = TypeDescription.createStruct();
        schema.addField("time", TypeDescription.createLong());

        VectorizedRowBatch batch = schema.createRowBatch(10000);
        String filePath = "/home/hdfs/fly_param/zlib211.orc";
        Configuration conf = new Configuration();
        Writer writer = null;
        try {
            FileSystem.getLocal(conf);
            writer = OrcFile.createWriter(
                    new Path(filePath),
                    OrcFile.writerOptions(conf)
                            .setSchema(schema)
                            .compress(CompressionKind.ZLIB)
                            .version(OrcFile.Version.V_0_12)
            );


            for (int i = 0; i < 10000; i++) {
                ((LongColumnVector) batch.cols[0]).fill(System.currentTimeMillis());
                if (count % batch.getMaxSize() == 0) {
                    writer.addRowBatch(batch);
                    batch.reset();
                    long end = System.currentTimeMillis();
                }
            }
            writer.addRowBatch(batch);
            writer.close();
        } catch (Exception e) {
            e.printStackTrace();

        }
    }

}
