package com;

import org.apache.spark.SparkConf;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SaveMode;
import org.apache.spark.sql.SparkSession;

public class SparkStreamingORCRW {

    public static void main(String[] args) {

        SparkConf conf = new SparkConf();
        SparkSession spark = SparkSession
                .builder()
                .appName("Java Spark SQL basic example")
                .config("spark.io.compression.codec", "ZSTD")
                .config("debug.maxToStringFields", "30000")
                .config("spark.sql.orc.impl", "native")
                .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
                .config("spark.files.overwrite", "true")
                .config(conf)
                .master("local[4]")
                .enableHiveSupport()
                .getOrCreate();

//        JavaStreamingContext ssc = new JavaStreamingContext(conf, new Duration(1000));

        Dataset<Row> data = spark.read()
                .option("header", true)
                .csv("D:\\project2018\\dec-kks-etl\\spark-611\\data\\test.csv");

        data.show(false);

        data.write()
                .mode(SaveMode.Overwrite)
                .format("orc")
                .option("orc.compress", "zlib")
                .save("D:\\project2018\\dec-kks-etl\\spark-611\\data\\tmp.zlib.orc");
    }
}
