package com.edata.bigdata.spark;


import org.apache.spark.sql.DataFrameWriter;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import static org.apache.spark.sql.functions.struct;
import static org.apache.spark.sql.functions.to_json;
/*
*
*
* */
public class KafkaWriter {

    public Logger logger = LoggerFactory.getLogger(KafkaWriter.class);
    public KafkaConnector kafkaConnector;
    public <T> DataFrameWriter<Row> createDataFrameWriter(Dataset<T> data) {
        logger.info("正在创建Kafka：{}，{} 消息生产者", kafkaConnector.BOOTSTRAP, kafkaConnector.TOPIC);
        //传入的实体类Dataset，会将转成Json，用“value”作为整个Json的标识
        Dataset<Row> kafkaReady = data.select(to_json(struct("*")).alias("value"));
        return kafkaReady.write().format("kafka")
                .option("kafka.bootstrap.servers", kafkaConnector.BOOTSTRAP)
                .option("topic", kafkaConnector.TOPIC);
    }
    public KafkaWriter() {
    }
}
