package com.apex.spark.sink;

import com.apex.spark.SparkEnvironment;
import com.apex.spark.batch.SparkBatchSink;
import com.apex.spark.utils.ConfigKeyName;
import com.typesafe.config.Config;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SaveMode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

public class FileBatchSink implements SparkBatchSink {


    private final Logger logger = LoggerFactory.getLogger(FileBatchSink.class);

    private Config config;

    private String path;

    private String fileType;

    private String parallelism;

    private String separator;

    @Override
    public void outputBatch(SparkEnvironment env, Dataset<Row> dataSet) {
        switch (fileType){
            case "text":
                dataSet.toJavaRDD().map(new Function<Row, String>() {
                    @Override
                    public String call(Row row) throws Exception {
                        return row.mkString(separator);
                    }
                })
                .repartition(Integer.valueOf(parallelism))
                .saveAsTextFile(path);
                break;
            case "csv":
                dataSet
                        .repartition(Integer.valueOf(parallelism))
                        .write()
                        .mode(SaveMode.Overwrite)
                        .option("timestampFormat","yyyy/MM/dd HH:mm:ss ZZ")
                        .csv(path);
                break;
            case "json":
                dataSet
                        .repartition(Integer.valueOf(parallelism))
                        .write()
                        .mode(SaveMode.Overwrite)
                        .option("timestampFormat","yyyy/MM/dd HH:mm:ss ZZ")
                        .json(path);
                break;
            case "orc":
                dataSet
                        .repartition(Integer.valueOf(parallelism))
                        .write()
                        .mode(SaveMode.Overwrite)
                        .option("timestampFormat","yyyy/MM/dd HH:mm:ss ZZ")
                        .orc(path);
                break;
            case "parquet":
                dataSet
                        .repartition(Integer.valueOf(parallelism))
                        .write()
                        .mode(SaveMode.Overwrite)
                        .option("timestampFormat","yyyy/MM/dd HH:mm:ss ZZ")
                        .parquet(path);
                break;
                default:
                    logger.error("no support {}",fileType);
                    System.exit(0);
        }

    }

    @Override
    public void prepare(SparkEnvironment plugin) {
        if (config.hasPath(ConfigKeyName.SPARK_WRITER_PATH)){
            path = config.getString(ConfigKeyName.SPARK_WRITER_PATH);
            fileType = config.getString(ConfigKeyName.SPARK_WRITER_FILE_TYPE);
            parallelism = config.getString(ConfigKeyName.SPARK_WRITER_FILE_PARALLELISM);
        }

        if (config.hasPath(ConfigKeyName.SPARK_WRITER_FILE_SEPARATOR)){
            separator = config.getString(ConfigKeyName.SPARK_WRITER_FILE_SEPARATOR);
        }else {
            separator = ",";
        }

    }

    @Override
    public Config getConfig() {
        return config;
    }

    @Override
    public void setConfig(Config config) {
        this.config = config;
    }
}
