package com.bleeth.flow.step.input;


import com.bleeth.flow.core.common.APlugin;
import com.bleeth.flow.core.common.PluginAnnotation;
import com.bleeth.flow.core.common.PluginTypeEnum;
import com.bleeth.flow.core.util.SparkUtil;
import com.jd.platform.async.wrapper.WorkerWrapper;
import lombok.Data;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;

import java.io.Serializable;
import java.util.Map;

/**
 * @author ：Bleeth
 * @date ：2021-08-05
 * @description：Excel读取插件
 */
@Data
@PluginAnnotation(name = "自定义聚合",
        type = PluginTypeEnum.AGG,
        description = "",
        id = "CsvReaderPlugin")
public class CsvReaderPlugin extends APlugin implements Serializable {

    private static final long serialVersionUID = -1895336557663637505L;

    private static final String DEFAULT_ENCODE = "UTF-8";
    private static final char DEFAULT_SPLIT = ',';
    private static final char DEFAULT_DELIMITER = '"';

    private String path;

    private String encode;

    private String delimiter;

    private String schema;


    @Override
    public Dataset<Row> action(Dataset<Row> param, Map<String, WorkerWrapper> allWrappers) {
        Dataset<Row> csvSet = SparkUtil.getSparkInstance().read()
                .format("csv")
                .option("header", true)
                .option("delimiter", delimiter)
                .load(path);
        return csvSet;
    }
}
