package org.mcclone.jr.spark;

import com.google.gson.Gson;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.RowFactory;
import org.apache.spark.sql.SaveMode;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka010.ConsumerStrategies;
import org.apache.spark.streaming.kafka010.KafkaUtils;
import org.apache.spark.streaming.kafka010.LocationStrategies;
import scala.Tuple2;

import java.util.*;

public class KafkaStreamRun {

    public static void main(String[] args) throws InterruptedException {
        SparkConf sparkConf = new SparkConf().setMaster("local").setAppName("JavaDirectKafkaWordCount");
        JavaStreamingContext jssc = new JavaStreamingContext(sparkConf, Durations.seconds(2));

        Set<String> topicsSet = new HashSet<>(Arrays.asList("my_tpc".split(",")));
        Map<String, Object> kafkaParams = new HashMap<>();
        kafkaParams.put("bootstrap.servers", "localhost:9092");
        kafkaParams.put(ConsumerConfig.GROUP_ID_CONFIG, "spark");
        kafkaParams.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
        kafkaParams.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");

        // Create direct kafka stream with brokers and topics
        JavaInputDStream<ConsumerRecord<String, String>> messages = KafkaUtils.createDirectStream(
                jssc,
                LocationStrategies.PreferConsistent(),
                ConsumerStrategies.Subscribe(topicsSet, kafkaParams));
        StructType userSchema = new StructType()
                .add("name", "string")
                .add("type", "string")
                .add("id", "string");
        messages.mapToPair(new PairFunction<ConsumerRecord<String, String>, String, String>() {
            @Override
            public Tuple2<String, String> call(ConsumerRecord<String, String> stringStringConsumerRecord) throws Exception {
                return Tuple2.apply(stringStringConsumerRecord.topic(), stringStringConsumerRecord.value());
            }
        }).groupByKey().foreachRDD(javaPairRDD -> {

        });

        messages.map(ConsumerRecord::value).foreachRDD(javaRDD -> {
            JavaRDD<Row> rowJavaRDD = javaRDD.map(new Function<String, Row>() {
                @Override
                public Row call(String s) throws Exception {
                    Map map = new Gson().fromJson(s, Map.class);
                    StructField[] fields = userSchema.fields();
                    Object[] objects = new Object[fields.length];
                    for (int i = 0; i < fields.length; i++) {
                        objects[i] = map.getOrDefault(fields[i].name(), null);
                    }
                    return RowFactory.create(objects);
                }
            });

            SparkSession spark = SparkSession
                    .builder().config(sparkConf)
                    .getOrCreate();

            spark.createDataFrame(rowJavaRDD, userSchema).write().mode(SaveMode.Append).parquet("/data/person.parquet");
        });

        jssc.start();
        jssc.awaitTermination();
    }
}
