package org.example.flink;

import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.core.fs.Path;
import org.apache.flink.formats.parquet.ParquetRowInputFormat;
import org.apache.flink.formats.parquet.ParquetWriterFactory;
import org.apache.flink.formats.parquet.avro.ParquetAvroWriters;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink;
import org.apache.flink.streaming.api.functions.sink.filesystem.bucketassigners.DateTimeBucketAssigner;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.types.Record;
import org.apache.flink.types.StringValue;
import org.apache.parquet.schema.MessageType;
import org.apache.parquet.schema.Type;

import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Properties;

import static java.time.format.DateTimeFormatter.ISO_LOCAL_DATE;

public class KafkaDemo {

    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.enableCheckpointing(5000); // checkpoint every 5000 msecs
        Properties properties = new Properties();
        properties.setProperty("bootstrap.servers", "localhost:9092");

        List<Type> typeList = new ArrayList<>();
        typeList.add(new MessageType("user"));
        MessageType messageType = new MessageType("", typeList);
        env.readFile(new ParquetRowInputFormat(new Path("/data"), messageType), "/data");

        DataStream<String> stream = env
                .addSource(new FlinkKafkaConsumer<>("topic", new SimpleStringSchema(), properties));

        stream.map((MapFunction<String, Record>) value -> {
            ObjectMapper objectMapper = new ObjectMapper();
            Map<String, String> map = objectMapper.readValue(value, new TypeReference<Map<String, String>>() {
            });
            Record record = new Record();
            record.setField(1, new StringValue());
            return record;
        });

        ParquetWriterFactory<String> writerFactory = ParquetAvroWriters.forReflectRecord(String.class);
        StreamingFileSink<String> streamingFileSink = StreamingFileSink.forBulkFormat(new Path("/data/"), writerFactory)
                .withBucketAssigner(new DateTimeBucketAssigner<>(ISO_LOCAL_DATE.toString()))
                .build();
        stream.addSink(streamingFileSink);

        env.execute("KafkaDemo");
    }
}
