package com.bleeth.flow.step.output;

import cn.hutool.core.util.StrUtil;
import com.bleeth.flow.core.common.APlugin;
import com.bleeth.flow.core.common.KV;
import com.bleeth.flow.core.common.PluginAnnotation;
import com.bleeth.flow.core.common.PluginTypeEnum;
import com.bleeth.flow.core.util.SparkUtil;
import com.jd.platform.async.worker.WorkResult;
import com.jd.platform.async.wrapper.WorkerWrapper;
import lombok.Data;
import org.apache.spark.sql.Column;
import org.apache.spark.sql.DataFrameReader;
import org.apache.spark.sql.DataFrameWriter;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SaveMode;

import java.io.Serializable;
import java.util.List;
import java.util.Map;

/**
 * @author ：Bleeth
 * @date ：2021-08-06
 * @description：File写插件
 */
@Data
@PluginAnnotation(name = "File写插件",
        type = PluginTypeEnum.OUTPUT,
        description = "File写插件,Spark中可以保存的数据格式都是支持的()",
        id = "FileWriterPlugin")
public class FileWriterPlugin extends APlugin implements Serializable {

    private String format;

    private String mode;

    private String save;

    private String partition;

    private RepartitionConf repartitionConf;

    private List<KV> options;


    @Override
    public void result(boolean success, Dataset<Row> input, WorkResult<Dataset<Row>> workResult) {
        super.result(success, input, workResult);
    }

    @Override
    public void init() {
        if (StrUtil.isEmpty(mode)) {
            mode = SaveMode.Append.name();
        }
    }


    @Override
    public Dataset<Row> action(Dataset<Row> param, Map<String, WorkerWrapper> allWrappers) {
        super.action(param, allWrappers);

        System.err.println(this);

        String fromPluginName = fromList.get(0);
        WorkerWrapper fromWrapper = allWrappers.get(fromPluginName);
        Dataset<Row> ds = (Dataset<Row>) fromWrapper.getWorkResult().getResult();

        if(repartitionConf!=null){
            ds =  ds.repartition(repartitionConf.num, new Column(repartitionConf.col));
        }

        DataFrameWriter<Row> md = ds.write();

        if(options!=null){
            for (KV option : options) {
                md.option(option.getKey(), option.getValue());
            }
        }

        if (StrUtil.isNotEmpty(partition)) {
            md.partitionBy(partition);
        }
        md.format(format).mode(mode).save(save);
        return null;
    }

    @Data
    public static class RepartitionConf{

        private String col;

        private Integer num;
    }


    public static void main(String[] args) {
        DataFrameReader reader = SparkUtil.getSparkInstance().read()
                .format("jdbc")
                .option("driver", "com.mysql.jdbc.Driver")
                .option("url", "jdbc:mysql://127.0.0.1:3306/che?useUnicode=true&characterEncoding=utf8&tinyInt1isBit=false")
                .option("user", "root")
                .option("password", "123456");
        reader.option("dbtable","user");
        Dataset<Row> ds = reader.load();

        ds.repartition(100,new Column("CardID"));

        DataFrameWriter<Row> md = ds.write();

        md.format("parquet").mode("APPEND").save("E:/客户/车站/spark/parquet1/bus0421.parquet");

    }

}
