package org.mcclone.jr.spark;

import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.spark.api.java.function.MapFunction;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Encoders;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.catalyst.expressions.GenericRowWithSchema;
import org.apache.spark.sql.streaming.StreamingQuery;
import org.apache.spark.sql.streaming.StreamingQueryException;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;

import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;

public class SparkStreamForKafka {

    public static void main(String[] args) throws StreamingQueryException {

        SparkSession spark = SparkSession
                .builder()
                .appName("JavaStructuredKafkaWordCount")
                .getOrCreate();
        spark.conf().set("spark.sql.streaming.metricsEnabled", "false");
        // Create DataSet representing the stream of input lines from kafka
        Dataset<String> lines = spark
                .readStream()
                .format("kafka")
                .option("startingOffsets","earliest")
                .option("kafka.bootstrap.servers", "ha:9092")
//                .option("kafka.group.id", "kafakStream")
                .option("subscribe", "my_tpc")
                .load()
                .selectExpr("CAST(value AS STRING)")
                .as(Encoders.STRING());

//        ProducerConfig.BATCH_SIZE_CONFIG;


        Dataset<Row> rowDataset = lines.map((MapFunction<String, Row>) s -> {
            ObjectMapper mapper = new ObjectMapper();
            Map<String, String> map = mapper.readValue(s, new TypeReference<Map<String, String>>() {
            });
//            Map<String, String> map = new Gson().fromJson(s, Map.class);
            List<String> value = new LinkedList<>();
            List<StructField> structFields = new ArrayList<>();
            for (Map.Entry<String, String> entry : map.entrySet()) {
                StructField field = DataTypes.createStructField(entry.getKey(), DataTypes.StringType, true);
                structFields.add(field);
                value.add(entry.getValue());
            }

            StructType schema = new StructType(structFields.toArray(new StructField[0]));
            return new GenericRowWithSchema(value.toArray(), schema);
        }, Encoders.bean(Row.class));
//        Dataset<Person> personDataset = lines.map((MapFunction<String, Person>) s -> new Gson().fromJson(s, Person.class), Encoders.bean(Person.class));
        StreamingQuery streamingQuery = rowDataset.writeStream()
                .outputMode("append")
//                .partitionBy("type")
//                .format("console")
                .format("parquet")
//                .trigger(Trigger.ProcessingTime(10, TimeUnit.MINUTES))
                .option("path", "/data/person.parquet")
                .option("checkpointLocation", "/data/checkpoint3")
                .start();

//        StreamingQuery streamingQuery = rowDataset.writeStream()
//                .option("checkpointLocation", "/data/checkpoint2")
//                .foreachBatch(new VoidFunction2<Dataset<Row>, Long>() {
//                    @Override
//                    public void call(Dataset<Row> rowDataset, Long aLong) throws Exception {
//                        rowDataset.foreach(new ForeachFunction<Row>() {
//                            @Override
//                            public void call(Row row) throws Exception {
//                                System.out.println(row);
//                            }
//                        });
//                    }
//                }).start();

        streamingQuery.awaitTermination();

    }
}
