package com.apex.spark.source;

import com.apex.spark.SparkEnvironment;
import com.apex.spark.batch.SparkBatchSource;
import com.apex.spark.utils.ConfigKeyName;
import com.typesafe.config.Config;
import org.apache.commons.lang3.StringUtils;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.sql.*;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.StructField;

import java.util.LinkedList;
import java.util.List;

public class FileBatchSource implements SparkBatchSource {
    private Config config;
    private String filePath;
    private String split;
    private String type;

    @Override
    public Dataset<Row> getData(SparkEnvironment env) {
        SparkSession sparkSession = env.getSparkSession();
        JavaSparkContext jsc = new JavaSparkContext(sparkSession.sparkContext());
        Dataset<Row> dataset = null;
        switch (type) {
            case "log":
            case "raw":
                List<StructField> informations = new LinkedList<>();
                informations.add(DataTypes.createStructField("message", DataTypes.StringType, true));
                JavaRDD<Row> javaRDDLog = jsc.textFile(filePath).map(new Function<String, Row>() {
                    @Override
                    public Row call(String value) throws Exception {
                        return RowFactory.create(value);
                    }
                });

                dataset = sparkSession.createDataFrame(javaRDDLog, DataTypes.createStructType(informations));
                break;
            case "text":
                JavaRDD<Row> javaRDDText = jsc.textFile(filePath).map(new Function<String, Row>() {
                    @Override
                    public Row call(String s) throws Exception {
                        Object[] strings = s.split(split);
                        return RowFactory.create(strings);
                    }
                });
                dataset = sparkSession.createDataFrame(javaRDDText, env.getStructType());
                break;

            case "orc":
                dataset = sparkSession.read().orc(filePath);
                break;
            case "parquet":
                sparkSession.read().parquet(filePath);
                break;
            case "json":
                dataset = sparkSession.read().json(filePath);
                break;
            case "csv":
                dataset = sparkSession.read().csv(filePath);
                break;
            default:
                dataset = sparkSession.read().load(filePath);
                break;
        }


        return dataset;
    }

    @Override
    public void prepare(SparkEnvironment plugin) {
        if (config.hasPath(ConfigKeyName.READ_FILE_PATH)) {
            filePath = config.getString(ConfigKeyName.READ_FILE_PATH);
            split = config.getString(ConfigKeyName.FILE_SPLIT);
            type = config.getString(ConfigKeyName.READ_FILE_TYPE);
        }
    }

    @Override
    public Config getConfig() {
        return config;
    }

    @Override
    public void setConfig(Config config) {
        this.config = config;
    }
}
