package org.danan.spark2hudi.app;

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.ForeachFunction;
import org.apache.spark.api.java.function.VoidFunction;
import org.apache.spark.broadcast.Broadcast;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.danan.spark2hudi.bean.MyKafkaProducer;
import org.danan.spark2hudi.utils.DateFormatUtil;
import org.danan.spark2hudi.utils.KafkaUtil;

import java.util.Iterator;
import java.util.List;

/**
 * Created with IntelliJ IDEA.
 *
 * @Author: NanHuang
 * @Date: 2023/07/12/9:21
 * @Description:
 */
public class Hudi2Kafka {
    public static void main(String[] args) throws ClassNotFoundException {
                System.setProperty("HADOOP_USER_NAME","danan");
        // 1 创建配置对象
        SparkConf conf = new SparkConf()
                .setMaster("local[2]")
                .setAppName("Hudi2Database")
                .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
                .set("spark.sql.legacy.timeParserPolicy", "LEGACY")
                .set("spark.sql.parquet.datetimeRebaseModeInRead","CORRECTED")
                .set("spark.kryoserializer.buffer.max", "512m");
//                .set("spark.sql.avro.datetimeRebaseModeInWrite","；CORRECTED");


        SparkSession spark = SparkSession.builder().config(conf).getOrCreate();
        JavaSparkContext jsc = new JavaSparkContext(spark.sparkContext());


        System.out.println("开始时间为：" + DateFormatUtil.toYmdHms(System.currentTimeMillis()));
        // 2 读Hudi数据
        Dataset<Row> ds = spark.read().format("hudi")
//                .load("hdfs://hadoop56:8020/etl_test/sxyc/emr/HRDiagnose");
                .load("hdfs://hadoop102:8020/spark2hudi/tdy_list_onurse");
        JavaRDD<Row> javaRDD = ds.javaRDD();


        // 3 写入kafka
        // 方式一：foreachPartition方式(900W数据，2分钟)
        javaRDD.foreachPartition(new VoidFunction<Iterator<Row>>() {

            @Override
            public void call(Iterator<Row> rowIterator) throws Exception {
                // 4 写入kafka
                KafkaProducer<String, String> producer = KafkaUtil.getProducer();
                while (rowIterator.hasNext()){
                    Row row = rowIterator.next();
                    // 定义topic，及写入数据格式
                    producer.send(new ProducerRecord<>("hudi_to_kafka", row.json()));
                }
            }
        });

        // 方式二：广播变量方式（未跑通）
//        MyKafkaProducer producer = new MyKafkaProducer();
//        Broadcast<MyKafkaProducer> bProducer = jsc.broadcast(producer);
//        javaRDD.foreach(new VoidFunction<Row>() {
//            @Override
//            public void call(Row row) throws Exception {
//                bProducer.value().send("hudi_to_kafka1",row.json());
//            }
//        });

        spark.close();
        jsc.close();

        System.out.println("结束时间为：" + DateFormatUtil.toYmdHms(System.currentTimeMillis()));
    }
}
