package com.example.dobs.demo.flink.io;

import com.example.dobs.demo.flink.io.flatten.BookParser;
import com.example.dobs.demo.flink.io.flatten.FlattenedBook;
import com.example.dobs.demo.flink.io.tool.ProtoMessageSerializer;
import com.google.protobuf.Message;
import com.twitter.chill.protobuf.ProtobufSerializer;
import mypackage.Message.Book;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.core.fs.Path;
import org.apache.flink.formats.parquet.avro.ParquetAvroWriters;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink;
import org.apache.flink.streaming.api.functions.sink.filesystem.bucketassigners.DateTimeBucketAssigner;
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.OnCheckpointRollingPolicy;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;

import java.util.List;
import java.util.Properties;

public class FlinkKafkaToLocal4BulkFormat {
    public static String FORMAT_STRING = "yyyyMMddHH";

    public static void main(String[] args) throws Exception {
        // 创建执行环境
//        StreamExecutionEnvironment see = StreamExecutionEnvironment.getExecutionEnvironment();
//        StreamExecutionEnvironment see = StreamExecutionEnvironment.createLocalEnvironment();
        StreamExecutionEnvironment see = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration());
        see.enableCheckpointing(60 * 1000);

        // Kafka 配置
        Properties properties = new Properties();
//        properties.setProperty("bootstrap.servers", "archlinux:9092");
        properties.setProperty("bootstrap.servers", "192.168.31.82:9092");
        properties.setProperty("group.id", "flink-group");
        see.getConfig().registerTypeWithKryoSerializer(Book.class, ProtobufSerializer.class);

        // 创建 Kafka 消费者
        FlinkKafkaConsumer<Message> consumer = new FlinkKafkaConsumer<>("book_topic", new ProtoMessageSerializer(Book.class), properties);
        consumer.setStartFromEarliest();
        // 从 Kafka 读取数据
        DataStream<Message> stream = see.addSource(consumer).setParallelism(1);
        BookParser parser = new BookParser();
        SingleOutputStreamOperator<FlattenedBook> streamOperator = stream.flatMap(
                (FlatMapFunction<Message, FlattenedBook>) (value, out) -> {
                    List<FlattenedBook> flattenedRecords = parser.parse(value);
                    for (FlattenedBook record : flattenedRecords) {
                        out.collect(record);
                    }
                }
        ).returns(FlattenedBook.class).setParallelism(1);
        // 自定义输出文件名前缀
        String outputPath = "file:///opt/develop/tmp/output";
        String fileNamePrefix = "my-20250408-prefix";

        OnCheckpointRollingPolicy rollingPolicy2 = OnCheckpointRollingPolicy.build();
        final StreamingFileSink sink = StreamingFileSink
                .forBulkFormat(new Path(outputPath), ParquetAvroWriters.forReflectRecord(FlattenedBook.class))
                .withBucketAssigner(new DateTimeBucketAssigner<>(FORMAT_STRING))
                .withRollingPolicy(rollingPolicy2)
                .withBucketCheckInterval(60 * 1000L) //默认1分钟，60*1000L
                .withOutputFileConfig(org.apache.flink.streaming.api.functions.sink.filesystem.OutputFileConfig
                        .builder()
                        .withPartPrefix(fileNamePrefix)
                        .build())
                .build();
        // 将数据写入本地文件系统
        streamOperator.addSink(sink).setParallelism(1).name("FileSink").uid("FileSink");
        System.out.println("Sink 已添加，开始执行任务");
//        stream.print();
        // 执行任务
        see.execute("Flink Kafka to Local");
    }
}    