package com.example.dobs.demo.flink.io;

import com.example.dobs.demo.flink.io.tool.MessageEncoder;
import com.example.dobs.demo.flink.io.tool.ProtoMessageSerializer;
import com.google.protobuf.Message;
import com.twitter.chill.protobuf.ProtobufSerializer;
import mypackage.Message.Book;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.core.fs.Path;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.filesystem.RollingPolicy;
import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink;
import org.apache.flink.streaming.api.functions.sink.filesystem.bucketassigners.DateTimeBucketAssigner;
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.DefaultRollingPolicy;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;

import java.util.Properties;
import java.util.concurrent.TimeUnit;

public class FlinkKafkaToLocal {
    public static void main(String[] args) throws Exception {
        // 创建执行环境
//        StreamExecutionEnvironment see = StreamExecutionEnvironment.getExecutionEnvironment();
//        StreamExecutionEnvironment see = StreamExecutionEnvironment.createLocalEnvironment();
        StreamExecutionEnvironment see = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration());
        see.enableCheckpointing(30000);

        // Kafka 配置
        Properties properties = new Properties();
//        properties.setProperty("bootstrap.servers", "archlinux:9092");
        properties.setProperty("bootstrap.servers", "192.168.31.82:9092");
        properties.setProperty("group.id", "flink-group");
        see.getConfig().registerTypeWithKryoSerializer(Book.class, ProtobufSerializer.class);

        // 创建 Kafka 消费者
        FlinkKafkaConsumer<Message> consumer = new FlinkKafkaConsumer<>("book_topic", new ProtoMessageSerializer(mypackage.Message.Book.class), properties);
        consumer.setStartFromEarliest();
        // 从 Kafka 读取数据
        DataStream<Message> stream = see.addSource(consumer).setParallelism(1);

        // 自定义输出文件名前缀
        String outputPath = "file:///Users/shareit/develop/tmp/output";
        String fileNamePrefix = "my-prefix";

//        // 创建 BucketingSink 并设置相关参数
//        BucketingSink<String> sink = new BucketingSink<>(outputPath);
//        sink.setBucketer(new DateTimeBucketer<>("yyyy-MM-dd--HH"));
//        sink.setWriter(new org.apache.flink.streaming.connectors.fs.StringWriter<>());
//        sink.setFileNamePrefix(fileNamePrefix);
        final RollingPolicy<Message, String> rollingPolicy = DefaultRollingPolicy.builder()
                .withMaxPartSize(1024L*1024L)
                .withRolloverInterval(TimeUnit.MINUTES.toMillis(1000L*300))
                .withInactivityInterval(TimeUnit.MINUTES.toMillis(1000L*300))
                .build();
        StreamingFileSink<Message> sink = StreamingFileSink
                .forRowFormat(new Path(outputPath), new MessageEncoder())
                .withBucketAssigner(new DateTimeBucketAssigner<>("yyyy/MM/dd/HH"))
                .withRollingPolicy(rollingPolicy)
//                .withBucketCheckInterval(1)
                .withOutputFileConfig(org.apache.flink.streaming.api.functions.sink.filesystem.OutputFileConfig
                        .builder()
                        .withPartPrefix(fileNamePrefix)
                        .build())
                .build();

        // 将数据写入本地文件系统
        stream.addSink(sink).setParallelism(2).name("FileSink").uid("FileSink");
        System.out.println("Sink 已添加，开始执行任务");
//        stream.print();
        // 执行任务
        see.execute("Flink Kafka to Local");
    }
}    