package com.abyss.sink.stream;

import org.apache.flink.api.common.serialization.SimpleStringEncoder;
import org.apache.flink.core.fs.Path;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink;
import org.apache.flink.streaming.api.functions.sink.filesystem.bucketassigners.BasePathBucketAssigner;
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.OnCheckpointRollingPolicy;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;

/**
 * StreamingFileSink的演示
 */
public class StreamingFileSinkDemo {
    public static void main(String[] args) throws Exception {
        // 1. Env
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        // 1.1 开启checkpoint
        env.enableCheckpointing(5000L, CheckpointingMode.EXACTLY_ONCE);
        env.setStateBackend(new FsStateBackend("file:///Users/abyss/Dev/toys/flink/H-flink-learn/src/main/resources/checkpoint"));

        // 2. Socket source
        DataStreamSource<String> socketTextStream = env.socketTextStream("node1", 9999);

        // 3. StreamingFileSink
        StreamingFileSink<String> streamingFileSink = StreamingFileSink.forRowFormat(
                new Path("data/output/streamingoutput"),
                // 文件写出的序列化器和编码
                new SimpleStringEncoder<String>("UTF-8")
                // 桶分配策略
        ).withBucketAssigner(new BasePathBucketAssigner<>())
                // 文件滚动(完成一次文件写出)的策略
                .withRollingPolicy(OnCheckpointRollingPolicy.build()).build();

        // 4. 完成sink的构建
        socketTextStream.addSink(streamingFileSink);

        // 5. 执行
        env.execute();
    }
}